xref: /openbmc/linux/drivers/atm/iphase.c (revision ed84ef1c)
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards
3                     Author: Peter Wang  <pwang@iphase.com>
4 		   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>
6                                Version: 1.0
7 *******************************************************************************
8 
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13 
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18 
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20       was originally written by Monalisa Agrawal at UNH. Now this driver
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23       in terms of PHY type, the size of control memory and the size of
24       packet memory. The following are the change log and history:
25 
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32 	  Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38 
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40 
41 *******************************************************************************/
42 
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/mm.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/ctype.h>
51 #include <linux/sonet.h>
52 #include <linux/skbuff.h>
53 #include <linux/time.h>
54 #include <linux/delay.h>
55 #include <linux/uio.h>
56 #include <linux/init.h>
57 #include <linux/interrupt.h>
58 #include <linux/wait.h>
59 #include <linux/slab.h>
60 #include <asm/io.h>
61 #include <linux/atomic.h>
62 #include <linux/uaccess.h>
63 #include <asm/string.h>
64 #include <asm/byteorder.h>
65 #include <linux/vmalloc.h>
66 #include <linux/jiffies.h>
67 #include <linux/nospec.h>
68 #include "iphase.h"
69 #include "suni.h"
70 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
71 
72 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
73 
74 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
75 static void desc_dbg(IADEV *iadev);
76 
77 static IADEV *ia_dev[8];
78 static struct atm_dev *_ia_dev[8];
79 static int iadev_count;
80 static void ia_led_timer(struct timer_list *unused);
81 static DEFINE_TIMER(ia_timer, ia_led_timer);
82 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
83 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
84 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
85             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
86 
87 module_param(IA_TX_BUF, int, 0);
88 module_param(IA_TX_BUF_SZ, int, 0);
89 module_param(IA_RX_BUF, int, 0);
90 module_param(IA_RX_BUF_SZ, int, 0);
91 module_param(IADebugFlag, uint, 0644);
92 
93 MODULE_LICENSE("GPL");
94 
95 /**************************** IA_LIB **********************************/
96 
97 static void ia_init_rtn_q (IARTN_Q *que)
98 {
99    que->next = NULL;
100    que->tail = NULL;
101 }
102 
103 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
104 {
105    data->next = NULL;
106    if (que->next == NULL)
107       que->next = que->tail = data;
108    else {
109       data->next = que->next;
110       que->next = data;
111    }
112    return;
113 }
114 
115 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
116    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
117    if (!entry)
118       return -ENOMEM;
119    entry->data = data;
120    entry->next = NULL;
121    if (que->next == NULL)
122       que->next = que->tail = entry;
123    else {
124       que->tail->next = entry;
125       que->tail = que->tail->next;
126    }
127    return 1;
128 }
129 
130 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
131    IARTN_Q *tmpdata;
132    if (que->next == NULL)
133       return NULL;
134    tmpdata = que->next;
135    if ( que->next == que->tail)
136       que->next = que->tail = NULL;
137    else
138       que->next = que->next->next;
139    return tmpdata;
140 }
141 
142 static void ia_hack_tcq(IADEV *dev) {
143 
144   u_short 		desc1;
145   u_short		tcq_wr;
146   struct ia_vcc         *iavcc_r = NULL;
147 
148   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
149   while (dev->host_tcq_wr != tcq_wr) {
150      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
151      if (!desc1) ;
152      else if (!dev->desc_tbl[desc1 -1].timestamp) {
153         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
154         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
155      }
156      else if (dev->desc_tbl[desc1 -1].timestamp) {
157         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
158            printk("IA: Fatal err in get_desc\n");
159            continue;
160         }
161         iavcc_r->vc_desc_cnt--;
162         dev->desc_tbl[desc1 -1].timestamp = 0;
163         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
164                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
165         if (iavcc_r->pcr < dev->rate_limit) {
166            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
167            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
168               printk("ia_hack_tcq: No memory available\n");
169         }
170         dev->desc_tbl[desc1 -1].iavcc = NULL;
171         dev->desc_tbl[desc1 -1].txskb = NULL;
172      }
173      dev->host_tcq_wr += 2;
174      if (dev->host_tcq_wr > dev->ffL.tcq_ed)
175         dev->host_tcq_wr = dev->ffL.tcq_st;
176   }
177 } /* ia_hack_tcq */
178 
179 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
180   u_short 		desc_num, i;
181   struct sk_buff        *skb;
182   struct ia_vcc         *iavcc_r = NULL;
183   unsigned long delta;
184   static unsigned long timer = 0;
185   int ltimeout;
186 
187   ia_hack_tcq (dev);
188   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
189      timer = jiffies;
190      i=0;
191      while (i < dev->num_tx_desc) {
192         if (!dev->desc_tbl[i].timestamp) {
193            i++;
194            continue;
195         }
196         ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
197         delta = jiffies - dev->desc_tbl[i].timestamp;
198         if (delta >= ltimeout) {
199            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
200            if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
201               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
202            else
203               dev->ffL.tcq_rd -= 2;
204            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
205            if (!(skb = dev->desc_tbl[i].txskb) ||
206                           !(iavcc_r = dev->desc_tbl[i].iavcc))
207               printk("Fatal err, desc table vcc or skb is NULL\n");
208            else
209               iavcc_r->vc_desc_cnt--;
210            dev->desc_tbl[i].timestamp = 0;
211            dev->desc_tbl[i].iavcc = NULL;
212            dev->desc_tbl[i].txskb = NULL;
213         }
214         i++;
215      } /* while */
216   }
217   if (dev->ffL.tcq_rd == dev->host_tcq_wr)
218      return 0xFFFF;
219 
220   /* Get the next available descriptor number from TCQ */
221   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
222 
223   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
224      dev->ffL.tcq_rd += 2;
225      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
226 	dev->ffL.tcq_rd = dev->ffL.tcq_st;
227      if (dev->ffL.tcq_rd == dev->host_tcq_wr)
228         return 0xFFFF;
229      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
230   }
231 
232   /* get system time */
233   dev->desc_tbl[desc_num -1].timestamp = jiffies;
234   return desc_num;
235 }
236 
237 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
238   u_char          	foundLockUp;
239   vcstatus_t		*vcstatus;
240   u_short               *shd_tbl;
241   u_short               tempCellSlot, tempFract;
242   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
243   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
244   u_int  i;
245 
246   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
247      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
248      vcstatus->cnt++;
249      foundLockUp = 0;
250      if( vcstatus->cnt == 0x05 ) {
251         abr_vc += vcc->vci;
252 	eabr_vc += vcc->vci;
253 	if( eabr_vc->last_desc ) {
254 	   if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
255               /* Wait for 10 Micro sec */
256               udelay(10);
257 	      if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
258 		 foundLockUp = 1;
259            }
260 	   else {
261 	      tempCellSlot = abr_vc->last_cell_slot;
262               tempFract    = abr_vc->fraction;
263               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
264                          && (tempFract == dev->testTable[vcc->vci]->fract))
265 	         foundLockUp = 1;
266               dev->testTable[vcc->vci]->lastTime = tempCellSlot;
267               dev->testTable[vcc->vci]->fract = tempFract;
268 	   }
269         } /* last descriptor */
270         vcstatus->cnt = 0;
271      } /* vcstatus->cnt */
272 
273      if (foundLockUp) {
274         IF_ABR(printk("LOCK UP found\n");)
275 	writew(0xFFFD, dev->seg_reg+MODE_REG_0);
276         /* Wait for 10 Micro sec */
277         udelay(10);
278         abr_vc->status &= 0xFFF8;
279         abr_vc->status |= 0x0001;  /* state is idle */
280 	shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
281 	for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
282 	if (i < dev->num_vc)
283            shd_tbl[i] = vcc->vci;
284         else
285            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
286         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
287         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
288         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
289 	vcstatus->cnt = 0;
290      } /* foundLockUp */
291 
292   } /* if an ABR VC */
293 
294 
295 }
296 
297 /*
298 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
299 **
300 **  +----+----+------------------+-------------------------------+
301 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
302 **  +----+----+------------------+-------------------------------+
303 **
304 **    R = reserved (written as 0)
305 **    NZ = 0 if 0 cells/sec; 1 otherwise
306 **
307 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
308 */
309 static u16
310 cellrate_to_float(u32 cr)
311 {
312 
313 #define	NZ 		0x4000
314 #define	M_BITS		9		/* Number of bits in mantissa */
315 #define	E_BITS		5		/* Number of bits in exponent */
316 #define	M_MASK		0x1ff
317 #define	E_MASK		0x1f
318   u16   flot;
319   u32	tmp = cr & 0x00ffffff;
320   int 	i   = 0;
321   if (cr == 0)
322      return 0;
323   while (tmp != 1) {
324      tmp >>= 1;
325      i++;
326   }
327   if (i == M_BITS)
328      flot = NZ | (i << M_BITS) | (cr & M_MASK);
329   else if (i < M_BITS)
330      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
331   else
332      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
333   return flot;
334 }
335 
336 #if 0
337 /*
338 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
339 */
340 static u32
341 float_to_cellrate(u16 rate)
342 {
343   u32   exp, mantissa, cps;
344   if ((rate & NZ) == 0)
345      return 0;
346   exp = (rate >> M_BITS) & E_MASK;
347   mantissa = rate & M_MASK;
348   if (exp == 0)
349      return 1;
350   cps = (1 << M_BITS) | mantissa;
351   if (exp == M_BITS)
352      cps = cps;
353   else if (exp > M_BITS)
354      cps <<= (exp - M_BITS);
355   else
356      cps >>= (M_BITS - exp);
357   return cps;
358 }
359 #endif
360 
361 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
362   srv_p->class_type = ATM_ABR;
363   srv_p->pcr        = dev->LineRate;
364   srv_p->mcr        = 0;
365   srv_p->icr        = 0x055cb7;
366   srv_p->tbe        = 0xffffff;
367   srv_p->frtt       = 0x3a;
368   srv_p->rif        = 0xf;
369   srv_p->rdf        = 0xb;
370   srv_p->nrm        = 0x4;
371   srv_p->trm        = 0x7;
372   srv_p->cdf        = 0x3;
373   srv_p->adtf       = 50;
374 }
375 
376 static int
377 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
378                                                 struct atm_vcc *vcc, u8 flag)
379 {
380   f_vc_abr_entry  *f_abr_vc;
381   r_vc_abr_entry  *r_abr_vc;
382   u32		icr;
383   u8		trm, nrm, crm;
384   u16		adtf, air, *ptr16;
385   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
386   f_abr_vc += vcc->vci;
387   switch (flag) {
388      case 1: /* FFRED initialization */
389 #if 0  /* sanity check */
390        if (srv_p->pcr == 0)
391           return INVALID_PCR;
392        if (srv_p->pcr > dev->LineRate)
393           srv_p->pcr = dev->LineRate;
394        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
395 	  return MCR_UNAVAILABLE;
396        if (srv_p->mcr > srv_p->pcr)
397 	  return INVALID_MCR;
398        if (!(srv_p->icr))
399 	  srv_p->icr = srv_p->pcr;
400        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
401 	  return INVALID_ICR;
402        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
403 	  return INVALID_TBE;
404        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
405 	  return INVALID_FRTT;
406        if (srv_p->nrm > MAX_NRM)
407 	  return INVALID_NRM;
408        if (srv_p->trm > MAX_TRM)
409 	  return INVALID_TRM;
410        if (srv_p->adtf > MAX_ADTF)
411           return INVALID_ADTF;
412        else if (srv_p->adtf == 0)
413 	  srv_p->adtf = 1;
414        if (srv_p->cdf > MAX_CDF)
415 	  return INVALID_CDF;
416        if (srv_p->rif > MAX_RIF)
417 	  return INVALID_RIF;
418        if (srv_p->rdf > MAX_RDF)
419 	  return INVALID_RDF;
420 #endif
421        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
422        f_abr_vc->f_vc_type = ABR;
423        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
424 			          /* i.e 2**n = 2 << (n-1) */
425        f_abr_vc->f_nrm = nrm << 8 | nrm;
426        trm = 100000/(2 << (16 - srv_p->trm));
427        if ( trm == 0) trm = 1;
428        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
429        crm = srv_p->tbe / nrm;
430        if (crm == 0) crm = 1;
431        f_abr_vc->f_crm = crm & 0xff;
432        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
433        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
434 				((srv_p->tbe/srv_p->frtt)*1000000) :
435 				(1000000/(srv_p->frtt/srv_p->tbe)));
436        f_abr_vc->f_icr = cellrate_to_float(icr);
437        adtf = (10000 * srv_p->adtf)/8192;
438        if (adtf == 0) adtf = 1;
439        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
440        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
441        f_abr_vc->f_acr = f_abr_vc->f_icr;
442        f_abr_vc->f_status = 0x0042;
443        break;
444     case 0: /* RFRED initialization */
445        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
446        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
447        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
448        r_abr_vc += vcc->vci;
449        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
450        air = srv_p->pcr << (15 - srv_p->rif);
451        if (air == 0) air = 1;
452        r_abr_vc->r_air = cellrate_to_float(air);
453        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
454        dev->sum_mcr	   += srv_p->mcr;
455        dev->n_abr++;
456        break;
457     default:
458        break;
459   }
460   return	0;
461 }
462 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
463    u32 rateLow=0, rateHigh, rate;
464    int entries;
465    struct ia_vcc *ia_vcc;
466 
467    int   idealSlot =0, testSlot, toBeAssigned, inc;
468    u32   spacing;
469    u16  *SchedTbl, *TstSchedTbl;
470    u16  cbrVC, vcIndex;
471    u32   fracSlot    = 0;
472    u32   sp_mod      = 0;
473    u32   sp_mod2     = 0;
474 
475    /* IpAdjustTrafficParams */
476    if (vcc->qos.txtp.max_pcr <= 0) {
477       IF_ERR(printk("PCR for CBR not defined\n");)
478       return -1;
479    }
480    rate = vcc->qos.txtp.max_pcr;
481    entries = rate / dev->Granularity;
482    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
483                                 entries, rate, dev->Granularity);)
484    if (entries < 1)
485       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
486    rateLow  =  entries * dev->Granularity;
487    rateHigh = (entries + 1) * dev->Granularity;
488    if (3*(rate - rateLow) > (rateHigh - rate))
489       entries++;
490    if (entries > dev->CbrRemEntries) {
491       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
492       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
493                                        entries, dev->CbrRemEntries);)
494       return -EBUSY;
495    }
496 
497    ia_vcc = INPH_IA_VCC(vcc);
498    ia_vcc->NumCbrEntry = entries;
499    dev->sum_mcr += entries * dev->Granularity;
500    /* IaFFrednInsertCbrSched */
501    // Starting at an arbitrary location, place the entries into the table
502    // as smoothly as possible
503    cbrVC   = 0;
504    spacing = dev->CbrTotEntries / entries;
505    sp_mod  = dev->CbrTotEntries % entries; // get modulo
506    toBeAssigned = entries;
507    fracSlot = 0;
508    vcIndex  = vcc->vci;
509    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
510    while (toBeAssigned)
511    {
512       // If this is the first time, start the table loading for this connection
513       // as close to entryPoint as possible.
514       if (toBeAssigned == entries)
515       {
516          idealSlot = dev->CbrEntryPt;
517          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
518          if (dev->CbrEntryPt >= dev->CbrTotEntries)
519             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
520       } else {
521          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
522          // in the table that would be  smoothest
523          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
524          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
525       }
526       if (idealSlot >= (int)dev->CbrTotEntries)
527          idealSlot -= dev->CbrTotEntries;
528       // Continuously check around this ideal value until a null
529       // location is encountered.
530       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
531       inc = 0;
532       testSlot = idealSlot;
533       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
534       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
535                                 testSlot, TstSchedTbl,toBeAssigned);)
536       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
537       while (cbrVC)  // If another VC at this location, we have to keep looking
538       {
539           inc++;
540           testSlot = idealSlot - inc;
541           if (testSlot < 0) { // Wrap if necessary
542              testSlot += dev->CbrTotEntries;
543              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
544                                                        SchedTbl,testSlot);)
545           }
546           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
547           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
548           if (!cbrVC)
549              break;
550           testSlot = idealSlot + inc;
551           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
552              testSlot -= dev->CbrTotEntries;
553              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
554              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
555                                             testSlot, toBeAssigned);)
556           }
557           // set table index and read in value
558           TstSchedTbl = (u16*)(SchedTbl + testSlot);
559           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
560                           TstSchedTbl,cbrVC,inc);)
561           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
562        } /* while */
563        // Move this VCI number into this location of the CBR Sched table.
564        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
565        dev->CbrRemEntries--;
566        toBeAssigned--;
567    } /* while */
568 
569    /* IaFFrednCbrEnable */
570    dev->NumEnabledCBR++;
571    if (dev->NumEnabledCBR == 1) {
572        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
573        IF_CBR(printk("CBR is enabled\n");)
574    }
575    return 0;
576 }
577 static void ia_cbrVc_close (struct atm_vcc *vcc) {
578    IADEV *iadev;
579    u16 *SchedTbl, NullVci = 0;
580    u32 i, NumFound;
581 
582    iadev = INPH_IA_DEV(vcc->dev);
583    iadev->NumEnabledCBR--;
584    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
585    if (iadev->NumEnabledCBR == 0) {
586       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
587       IF_CBR (printk("CBR support disabled\n");)
588    }
589    NumFound = 0;
590    for (i=0; i < iadev->CbrTotEntries; i++)
591    {
592       if (*SchedTbl == vcc->vci) {
593          iadev->CbrRemEntries++;
594          *SchedTbl = NullVci;
595          IF_CBR(NumFound++;)
596       }
597       SchedTbl++;
598    }
599    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
600 }
601 
602 static int ia_avail_descs(IADEV *iadev) {
603    int tmp = 0;
604    ia_hack_tcq(iadev);
605    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
606       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
607    else
608       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
609                    iadev->ffL.tcq_st) / 2;
610    return tmp;
611 }
612 
613 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
614 
615 static int ia_que_tx (IADEV *iadev) {
616    struct sk_buff *skb;
617    int num_desc;
618    struct atm_vcc *vcc;
619    num_desc = ia_avail_descs(iadev);
620 
621    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
622       if (!(vcc = ATM_SKB(skb)->vcc)) {
623          dev_kfree_skb_any(skb);
624          printk("ia_que_tx: Null vcc\n");
625          break;
626       }
627       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
628          dev_kfree_skb_any(skb);
629          printk("Free the SKB on closed vci %d \n", vcc->vci);
630          break;
631       }
632       if (ia_pkt_tx (vcc, skb)) {
633          skb_queue_head(&iadev->tx_backlog, skb);
634       }
635       num_desc--;
636    }
637    return 0;
638 }
639 
640 static void ia_tx_poll (IADEV *iadev) {
641    struct atm_vcc *vcc = NULL;
642    struct sk_buff *skb = NULL, *skb1 = NULL;
643    struct ia_vcc *iavcc;
644    IARTN_Q *  rtne;
645 
646    ia_hack_tcq(iadev);
647    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
648        skb = rtne->data.txskb;
649        if (!skb) {
650            printk("ia_tx_poll: skb is null\n");
651            goto out;
652        }
653        vcc = ATM_SKB(skb)->vcc;
654        if (!vcc) {
655            printk("ia_tx_poll: vcc is null\n");
656            dev_kfree_skb_any(skb);
657 	   goto out;
658        }
659 
660        iavcc = INPH_IA_VCC(vcc);
661        if (!iavcc) {
662            printk("ia_tx_poll: iavcc is null\n");
663            dev_kfree_skb_any(skb);
664 	   goto out;
665        }
666 
667        skb1 = skb_dequeue(&iavcc->txing_skb);
668        while (skb1 && (skb1 != skb)) {
669           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
670              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
671           }
672           IF_ERR(printk("Release the SKB not match\n");)
673           if ((vcc->pop) && (skb1->len != 0))
674           {
675              vcc->pop(vcc, skb1);
676              IF_EVENT(printk("Transmit Done - skb 0x%lx return\n",
677                                                           (long)skb1);)
678           }
679           else
680              dev_kfree_skb_any(skb1);
681           skb1 = skb_dequeue(&iavcc->txing_skb);
682        }
683        if (!skb1) {
684           IF_EVENT(printk("IA: Vci %d - skb not found requeued\n",vcc->vci);)
685           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
686           break;
687        }
688        if ((vcc->pop) && (skb->len != 0))
689        {
690           vcc->pop(vcc, skb);
691           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
692        }
693        else
694           dev_kfree_skb_any(skb);
695        kfree(rtne);
696     }
697     ia_que_tx(iadev);
698 out:
699     return;
700 }
701 #if 0
702 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
703 {
704         u32	t;
705 	int	i;
706 	/*
707 	 * Issue a command to enable writes to the NOVRAM
708 	 */
709 	NVRAM_CMD (EXTEND + EWEN);
710 	NVRAM_CLR_CE;
711 	/*
712 	 * issue the write command
713 	 */
714 	NVRAM_CMD(IAWRITE + addr);
715 	/*
716 	 * Send the data, starting with D15, then D14, and so on for 16 bits
717 	 */
718 	for (i=15; i>=0; i--) {
719 		NVRAM_CLKOUT (val & 0x8000);
720 		val <<= 1;
721 	}
722 	NVRAM_CLR_CE;
723 	CFG_OR(NVCE);
724 	t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
725 	while (!(t & NVDO))
726 		t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
727 
728 	NVRAM_CLR_CE;
729 	/*
730 	 * disable writes again
731 	 */
732 	NVRAM_CMD(EXTEND + EWDS)
733 	NVRAM_CLR_CE;
734 	CFG_AND(~NVDI);
735 }
736 #endif
737 
738 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
739 {
740 	u_short	val;
741         u32	t;
742 	int	i;
743 	/*
744 	 * Read the first bit that was clocked with the falling edge of the
745 	 * the last command data clock
746 	 */
747 	NVRAM_CMD(IAREAD + addr);
748 	/*
749 	 * Now read the rest of the bits, the next bit read is D14, then D13,
750 	 * and so on.
751 	 */
752 	val = 0;
753 	for (i=15; i>=0; i--) {
754 		NVRAM_CLKIN(t);
755 		val |= (t << i);
756 	}
757 	NVRAM_CLR_CE;
758 	CFG_AND(~NVDI);
759 	return val;
760 }
761 
762 static void ia_hw_type(IADEV *iadev) {
763    u_short memType = ia_eeprom_get(iadev, 25);
764    iadev->memType = memType;
765    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
766       iadev->num_tx_desc = IA_TX_BUF;
767       iadev->tx_buf_sz = IA_TX_BUF_SZ;
768       iadev->num_rx_desc = IA_RX_BUF;
769       iadev->rx_buf_sz = IA_RX_BUF_SZ;
770    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
771       if (IA_TX_BUF == DFL_TX_BUFFERS)
772         iadev->num_tx_desc = IA_TX_BUF / 2;
773       else
774         iadev->num_tx_desc = IA_TX_BUF;
775       iadev->tx_buf_sz = IA_TX_BUF_SZ;
776       if (IA_RX_BUF == DFL_RX_BUFFERS)
777         iadev->num_rx_desc = IA_RX_BUF / 2;
778       else
779         iadev->num_rx_desc = IA_RX_BUF;
780       iadev->rx_buf_sz = IA_RX_BUF_SZ;
781    }
782    else {
783       if (IA_TX_BUF == DFL_TX_BUFFERS)
784         iadev->num_tx_desc = IA_TX_BUF / 8;
785       else
786         iadev->num_tx_desc = IA_TX_BUF;
787       iadev->tx_buf_sz = IA_TX_BUF_SZ;
788       if (IA_RX_BUF == DFL_RX_BUFFERS)
789         iadev->num_rx_desc = IA_RX_BUF / 8;
790       else
791         iadev->num_rx_desc = IA_RX_BUF;
792       iadev->rx_buf_sz = IA_RX_BUF_SZ;
793    }
794    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
795    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
796          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
797          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
798 
799 #if 0
800    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
801       iadev->phy_type = PHY_OC3C_S;
802    else if ((memType & FE_MASK) == FE_UTP_OPTION)
803       iadev->phy_type = PHY_UTP155;
804    else
805      iadev->phy_type = PHY_OC3C_M;
806 #endif
807 
808    iadev->phy_type = memType & FE_MASK;
809    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
810                                          memType,iadev->phy_type);)
811    if (iadev->phy_type == FE_25MBIT_PHY)
812       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
813    else if (iadev->phy_type == FE_DS3_PHY)
814       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
815    else if (iadev->phy_type == FE_E3_PHY)
816       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
817    else
818        iadev->LineRate = (u32)(ATM_OC3_PCR);
819    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
820 
821 }
822 
823 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
824 {
825 	return readl(ia->phy + (reg >> 2));
826 }
827 
828 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
829 {
830 	writel(val, ia->phy + (reg >> 2));
831 }
832 
833 static void ia_frontend_intr(struct iadev_priv *iadev)
834 {
835 	u32 status;
836 
837 	if (iadev->phy_type & FE_25MBIT_PHY) {
838 		status = ia_phy_read32(iadev, MB25_INTR_STATUS);
839 		iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
840 	} else if (iadev->phy_type & FE_DS3_PHY) {
841 		ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
842 		status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
843 		iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
844 	} else if (iadev->phy_type & FE_E3_PHY) {
845 		ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
846 		status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
847 		iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
848 	} else {
849 		status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
850 		iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
851 	}
852 
853 	printk(KERN_INFO "IA: SUNI carrier %s\n",
854 		iadev->carrier_detect ? "detected" : "lost signal");
855 }
856 
857 static void ia_mb25_init(struct iadev_priv *iadev)
858 {
859 #if 0
860    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
861 #endif
862 	ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
863 	ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
864 
865 	iadev->carrier_detect =
866 		(ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
867 }
868 
869 struct ia_reg {
870 	u16 reg;
871 	u16 val;
872 };
873 
874 static void ia_phy_write(struct iadev_priv *iadev,
875 			 const struct ia_reg *regs, int len)
876 {
877 	while (len--) {
878 		ia_phy_write32(iadev, regs->reg, regs->val);
879 		regs++;
880 	}
881 }
882 
883 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
884 {
885 	static const struct ia_reg suni_ds3_init[] = {
886 		{ SUNI_DS3_FRM_INTR_ENBL,	0x17 },
887 		{ SUNI_DS3_FRM_CFG,		0x01 },
888 		{ SUNI_DS3_TRAN_CFG,		0x01 },
889 		{ SUNI_CONFIG,			0 },
890 		{ SUNI_SPLR_CFG,		0 },
891 		{ SUNI_SPLT_CFG,		0 }
892 	};
893 	u32 status;
894 
895 	status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
896 	iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
897 
898 	ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
899 }
900 
901 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
902 {
903 	static const struct ia_reg suni_e3_init[] = {
904 		{ SUNI_E3_FRM_FRAM_OPTIONS,		0x04 },
905 		{ SUNI_E3_FRM_MAINT_OPTIONS,		0x20 },
906 		{ SUNI_E3_FRM_FRAM_INTR_ENBL,		0x1d },
907 		{ SUNI_E3_FRM_MAINT_INTR_ENBL,		0x30 },
908 		{ SUNI_E3_TRAN_STAT_DIAG_OPTIONS,	0 },
909 		{ SUNI_E3_TRAN_FRAM_OPTIONS,		0x01 },
910 		{ SUNI_CONFIG,				SUNI_PM7345_E3ENBL },
911 		{ SUNI_SPLR_CFG,			0x41 },
912 		{ SUNI_SPLT_CFG,			0x41 }
913 	};
914 	u32 status;
915 
916 	status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
917 	iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
918 	ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
919 }
920 
921 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
922 {
923 	static const struct ia_reg suni_init[] = {
924 		/* Enable RSOP loss of signal interrupt. */
925 		{ SUNI_INTR_ENBL,		0x28 },
926 		/* Clear error counters. */
927 		{ SUNI_ID_RESET,		0 },
928 		/* Clear "PMCTST" in master test register. */
929 		{ SUNI_MASTER_TEST,		0 },
930 
931 		{ SUNI_RXCP_CTRL,		0x2c },
932 		{ SUNI_RXCP_FCTRL,		0x81 },
933 
934 		{ SUNI_RXCP_IDLE_PAT_H1,	0 },
935 		{ SUNI_RXCP_IDLE_PAT_H2,	0 },
936 		{ SUNI_RXCP_IDLE_PAT_H3,	0 },
937 		{ SUNI_RXCP_IDLE_PAT_H4,	0x01 },
938 
939 		{ SUNI_RXCP_IDLE_MASK_H1,	0xff },
940 		{ SUNI_RXCP_IDLE_MASK_H2,	0xff },
941 		{ SUNI_RXCP_IDLE_MASK_H3,	0xff },
942 		{ SUNI_RXCP_IDLE_MASK_H4,	0xfe },
943 
944 		{ SUNI_RXCP_CELL_PAT_H1,	0 },
945 		{ SUNI_RXCP_CELL_PAT_H2,	0 },
946 		{ SUNI_RXCP_CELL_PAT_H3,	0 },
947 		{ SUNI_RXCP_CELL_PAT_H4,	0x01 },
948 
949 		{ SUNI_RXCP_CELL_MASK_H1,	0xff },
950 		{ SUNI_RXCP_CELL_MASK_H2,	0xff },
951 		{ SUNI_RXCP_CELL_MASK_H3,	0xff },
952 		{ SUNI_RXCP_CELL_MASK_H4,	0xff },
953 
954 		{ SUNI_TXCP_CTRL,		0xa4 },
955 		{ SUNI_TXCP_INTR_EN_STS,	0x10 },
956 		{ SUNI_TXCP_IDLE_PAT_H5,	0x55 }
957 	};
958 
959 	if (iadev->phy_type & FE_DS3_PHY)
960 		ia_suni_pm7345_init_ds3(iadev);
961 	else
962 		ia_suni_pm7345_init_e3(iadev);
963 
964 	ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
965 
966 	ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
967 		~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
968 		  SUNI_PM7345_DLB | SUNI_PM7345_PLB));
969 #ifdef __SNMP__
970    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
971 #endif /* __SNMP__ */
972    return;
973 }
974 
975 
976 /***************************** IA_LIB END *****************************/
977 
978 #ifdef CONFIG_ATM_IA_DEBUG
979 static int tcnter = 0;
980 static void xdump( u_char*  cp, int  length, char*  prefix )
981 {
982     int col, count;
983     u_char prntBuf[120];
984     u_char*  pBuf = prntBuf;
985     count = 0;
986     while(count < length){
987         pBuf += sprintf( pBuf, "%s", prefix );
988         for(col = 0;count + col < length && col < 16; col++){
989             if (col != 0 && (col % 4) == 0)
990                 pBuf += sprintf( pBuf, " " );
991             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
992         }
993         while(col++ < 16){      /* pad end of buffer with blanks */
994             if ((col % 4) == 0)
995                 sprintf( pBuf, " " );
996             pBuf += sprintf( pBuf, "   " );
997         }
998         pBuf += sprintf( pBuf, "  " );
999         for(col = 0;count + col < length && col < 16; col++){
1000 		u_char c = cp[count + col];
1001 
1002 		if (isascii(c) && isprint(c))
1003 			pBuf += sprintf(pBuf, "%c", c);
1004 		else
1005 			pBuf += sprintf(pBuf, ".");
1006                 }
1007         printk("%s\n", prntBuf);
1008         count += col;
1009         pBuf = prntBuf;
1010     }
1011 
1012 }  /* close xdump(... */
1013 #endif /* CONFIG_ATM_IA_DEBUG */
1014 
1015 
1016 static struct atm_dev *ia_boards = NULL;
1017 
1018 #define ACTUAL_RAM_BASE \
1019 	RAM_BASE*((iadev->mem)/(128 * 1024))
1020 #define ACTUAL_SEG_RAM_BASE \
1021 	IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1022 #define ACTUAL_REASS_RAM_BASE \
1023 	IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1024 
1025 
1026 /*-- some utilities and memory allocation stuff will come here -------------*/
1027 
1028 static void desc_dbg(IADEV *iadev) {
1029 
1030   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1031   u32 i;
1032   void __iomem *tmp;
1033   // regval = readl((u32)ia_cmds->maddr);
1034   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1035   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1036                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1037                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1038   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr,
1039                    iadev->ffL.tcq_rd);
1040   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1041   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1042   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1043   i = 0;
1044   while (tcq_st_ptr != tcq_ed_ptr) {
1045       tmp = iadev->seg_ram+tcq_st_ptr;
1046       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1047       tcq_st_ptr += 2;
1048   }
1049   for(i=0; i <iadev->num_tx_desc; i++)
1050       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1051 }
1052 
1053 
1054 /*----------------------------- Receiving side stuff --------------------------*/
1055 
1056 static void rx_excp_rcvd(struct atm_dev *dev)
1057 {
1058 #if 0 /* closing the receiving size will cause too many excp int */
1059   IADEV *iadev;
1060   u_short state;
1061   u_short excpq_rd_ptr;
1062   //u_short *ptr;
1063   int vci, error = 1;
1064   iadev = INPH_IA_DEV(dev);
1065   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1066   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1067   { printk("state = %x \n", state);
1068         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1069  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1070         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1071             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1072         // TODO: update exception stat
1073 	vci = readw(iadev->reass_ram+excpq_rd_ptr);
1074 	error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1075         // pwang_test
1076 	excpq_rd_ptr += 4;
1077 	if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1078  	    excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1079 	writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1080         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1081   }
1082 #endif
1083 }
1084 
1085 static void free_desc(struct atm_dev *dev, int desc)
1086 {
1087 	IADEV *iadev;
1088 	iadev = INPH_IA_DEV(dev);
1089         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1090 	iadev->rfL.fdq_wr +=2;
1091 	if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1092 		iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;
1093 	writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1094 }
1095 
1096 
1097 static int rx_pkt(struct atm_dev *dev)
1098 {
1099 	IADEV *iadev;
1100 	struct atm_vcc *vcc;
1101 	unsigned short status;
1102 	struct rx_buf_desc __iomem *buf_desc_ptr;
1103 	int desc;
1104 	struct dle* wr_ptr;
1105 	int len;
1106 	struct sk_buff *skb;
1107 	u_int buf_addr, dma_addr;
1108 
1109 	iadev = INPH_IA_DEV(dev);
1110 	if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1111 	{
1112    	    printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1113 	    return -EINVAL;
1114 	}
1115 	/* mask 1st 3 bits to get the actual descno. */
1116 	desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1117         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1118                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1119               printk(" pcq_wr_ptr = 0x%x\n",
1120                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1121 	/* update the read pointer  - maybe we shud do this in the end*/
1122 	if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1123 		iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1124 	else
1125 		iadev->rfL.pcq_rd += 2;
1126 	writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1127 
1128 	/* get the buffer desc entry.
1129 		update stuff. - doesn't seem to be any update necessary
1130 	*/
1131 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1132 	/* make the ptr point to the corresponding buffer desc entry */
1133 	buf_desc_ptr += desc;
1134         if (!desc || (desc > iadev->num_rx_desc) ||
1135                       ((buf_desc_ptr->vc_index & 0xffff) >= iadev->num_vc)) {
1136             free_desc(dev, desc);
1137             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1138             return -1;
1139         }
1140 	vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1141 	if (!vcc)
1142 	{
1143                 free_desc(dev, desc);
1144 		printk("IA: null vcc, drop PDU\n");
1145 		return -1;
1146 	}
1147 
1148 
1149 	/* might want to check the status bits for errors */
1150 	status = (u_short) (buf_desc_ptr->desc_mode);
1151 	if (status & (RX_CER | RX_PTE | RX_OFL))
1152 	{
1153                 atomic_inc(&vcc->stats->rx_err);
1154 		IF_ERR(printk("IA: bad packet, dropping it");)
1155                 if (status & RX_CER) {
1156                     IF_ERR(printk(" cause: packet CRC error\n");)
1157                 }
1158                 else if (status & RX_PTE) {
1159                     IF_ERR(printk(" cause: packet time out\n");)
1160                 }
1161                 else {
1162                     IF_ERR(printk(" cause: buffer overflow\n");)
1163                 }
1164 		goto out_free_desc;
1165 	}
1166 
1167 	/*
1168 		build DLE.
1169 	*/
1170 
1171 	buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1172 	dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1173 	len = dma_addr - buf_addr;
1174         if (len > iadev->rx_buf_sz) {
1175            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1176            atomic_inc(&vcc->stats->rx_err);
1177 	   goto out_free_desc;
1178         }
1179 
1180         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1181            if (vcc->vci < 32)
1182               printk("Drop control packets\n");
1183 	   goto out_free_desc;
1184         }
1185 	skb_put(skb,len);
1186         // pwang_test
1187         ATM_SKB(skb)->vcc = vcc;
1188         ATM_DESC(skb) = desc;
1189 	skb_queue_tail(&iadev->rx_dma_q, skb);
1190 
1191 	/* Build the DLE structure */
1192 	wr_ptr = iadev->rx_dle_q.write;
1193 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1194 					      len, DMA_FROM_DEVICE);
1195 	wr_ptr->local_pkt_addr = buf_addr;
1196 	wr_ptr->bytes = len;	/* We don't know this do we ?? */
1197 	wr_ptr->mode = DMA_INT_ENABLE;
1198 
1199 	/* shud take care of wrap around here too. */
1200         if(++wr_ptr == iadev->rx_dle_q.end)
1201              wr_ptr = iadev->rx_dle_q.start;
1202 	iadev->rx_dle_q.write = wr_ptr;
1203 	udelay(1);
1204 	/* Increment transaction counter */
1205 	writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1206 out:	return 0;
1207 out_free_desc:
1208         free_desc(dev, desc);
1209         goto out;
1210 }
1211 
1212 static void rx_intr(struct atm_dev *dev)
1213 {
1214   IADEV *iadev;
1215   u_short status;
1216   u_short state, i;
1217 
1218   iadev = INPH_IA_DEV(dev);
1219   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1220   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1221   if (status & RX_PKT_RCVD)
1222   {
1223 	/* do something */
1224 	/* Basically recvd an interrupt for receiving a packet.
1225 	A descriptor would have been written to the packet complete
1226 	queue. Get all the descriptors and set up dma to move the
1227 	packets till the packet complete queue is empty..
1228 	*/
1229 	state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1230         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1231 	while(!(state & PCQ_EMPTY))
1232 	{
1233              rx_pkt(dev);
1234 	     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1235 	}
1236         iadev->rxing = 1;
1237   }
1238   if (status & RX_FREEQ_EMPT)
1239   {
1240      if (iadev->rxing) {
1241         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1242         iadev->rx_tmp_jif = jiffies;
1243         iadev->rxing = 0;
1244      }
1245      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1246                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1247         for (i = 1; i <= iadev->num_rx_desc; i++)
1248                free_desc(dev, i);
1249 printk("Test logic RUN!!!!\n");
1250         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1251         iadev->rxing = 1;
1252      }
1253      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1254   }
1255 
1256   if (status & RX_EXCP_RCVD)
1257   {
1258 	/* probably need to handle the exception queue also. */
1259 	IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1260 	rx_excp_rcvd(dev);
1261   }
1262 
1263 
1264   if (status & RX_RAW_RCVD)
1265   {
1266 	/* need to handle the raw incoming cells. This deepnds on
1267 	whether we have programmed to receive the raw cells or not.
1268 	Else ignore. */
1269 	IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)
1270   }
1271 }
1272 
1273 
1274 static void rx_dle_intr(struct atm_dev *dev)
1275 {
1276   IADEV *iadev;
1277   struct atm_vcc *vcc;
1278   struct sk_buff *skb;
1279   int desc;
1280   u_short state;
1281   struct dle *dle, *cur_dle;
1282   u_int dle_lp;
1283   int len;
1284   iadev = INPH_IA_DEV(dev);
1285 
1286   /* free all the dles done, that is just update our own dle read pointer
1287 	- do we really need to do this. Think not. */
1288   /* DMA is done, just get all the recevie buffers from the rx dma queue
1289 	and push them up to the higher layer protocol. Also free the desc
1290 	associated with the buffer. */
1291   dle = iadev->rx_dle_q.read;
1292   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1293   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1294   while(dle != cur_dle)
1295   {
1296       /* free the DMAed skb */
1297       skb = skb_dequeue(&iadev->rx_dma_q);
1298       if (!skb)
1299          goto INCR_DLE;
1300       desc = ATM_DESC(skb);
1301       free_desc(dev, desc);
1302 
1303       if (!(len = skb->len))
1304       {
1305           printk("rx_dle_intr: skb len 0\n");
1306 	  dev_kfree_skb_any(skb);
1307       }
1308       else
1309       {
1310           struct cpcs_trailer *trailer;
1311           u_short length;
1312           struct ia_vcc *ia_vcc;
1313 
1314 	  dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1315 			   len, DMA_FROM_DEVICE);
1316           /* no VCC related housekeeping done as yet. lets see */
1317           vcc = ATM_SKB(skb)->vcc;
1318 	  if (!vcc) {
1319 	      printk("IA: null vcc\n");
1320               dev_kfree_skb_any(skb);
1321               goto INCR_DLE;
1322           }
1323           ia_vcc = INPH_IA_VCC(vcc);
1324           if (ia_vcc == NULL)
1325           {
1326              atomic_inc(&vcc->stats->rx_err);
1327              atm_return(vcc, skb->truesize);
1328              dev_kfree_skb_any(skb);
1329              goto INCR_DLE;
1330            }
1331           // get real pkt length  pwang_test
1332           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1333                                  skb->len - sizeof(*trailer));
1334 	  length = swap_byte_order(trailer->length);
1335           if ((length > iadev->rx_buf_sz) || (length >
1336                               (skb->len - sizeof(struct cpcs_trailer))))
1337           {
1338              atomic_inc(&vcc->stats->rx_err);
1339              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)",
1340                                                             length, skb->len);)
1341              atm_return(vcc, skb->truesize);
1342              dev_kfree_skb_any(skb);
1343              goto INCR_DLE;
1344           }
1345           skb_trim(skb, length);
1346 
1347 	  /* Display the packet */
1348 	  IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1349           xdump(skb->data, skb->len, "RX: ");
1350           printk("\n");)
1351 
1352 	  IF_RX(printk("rx_dle_intr: skb push");)
1353 	  vcc->push(vcc,skb);
1354 	  atomic_inc(&vcc->stats->rx);
1355           iadev->rx_pkt_cnt++;
1356       }
1357 INCR_DLE:
1358       if (++dle == iadev->rx_dle_q.end)
1359     	  dle = iadev->rx_dle_q.start;
1360   }
1361   iadev->rx_dle_q.read = dle;
1362 
1363   /* if the interrupts are masked because there were no free desc available,
1364 		unmask them now. */
1365   if (!iadev->rxing) {
1366      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1367      if (!(state & FREEQ_EMPTY)) {
1368         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1369         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1370                                       iadev->reass_reg+REASS_MASK_REG);
1371         iadev->rxing++;
1372      }
1373   }
1374 }
1375 
1376 
1377 static int open_rx(struct atm_vcc *vcc)
1378 {
1379 	IADEV *iadev;
1380 	u_short __iomem *vc_table;
1381 	u_short __iomem *reass_ptr;
1382 	IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1383 
1384 	if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1385 	iadev = INPH_IA_DEV(vcc->dev);
1386         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1387            if (iadev->phy_type & FE_25MBIT_PHY) {
1388                printk("IA:  ABR not support\n");
1389                return -EINVAL;
1390            }
1391         }
1392 	/* Make only this VCI in the vc table valid and let all
1393 		others be invalid entries */
1394 	vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1395 	vc_table += vcc->vci;
1396 	/* mask the last 6 bits and OR it with 3 for 1K VCs */
1397 
1398         *vc_table = vcc->vci << 6;
1399 	/* Also keep a list of open rx vcs so that we can attach them with
1400 		incoming PDUs later. */
1401 	if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1402                                 (vcc->qos.txtp.traffic_class == ATM_ABR))
1403 	{
1404                 srv_cls_param_t srv_p;
1405                 init_abr_vc(iadev, &srv_p);
1406                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1407 	}
1408        	else {  /* for UBR  later may need to add CBR logic */
1409         	reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1410            	reass_ptr += vcc->vci;
1411            	*reass_ptr = NO_AAL5_PKT;
1412        	}
1413 
1414 	if (iadev->rx_open[vcc->vci])
1415 		printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1416 			vcc->dev->number, vcc->vci);
1417 	iadev->rx_open[vcc->vci] = vcc;
1418 	return 0;
1419 }
1420 
1421 static int rx_init(struct atm_dev *dev)
1422 {
1423 	IADEV *iadev;
1424 	struct rx_buf_desc __iomem *buf_desc_ptr;
1425 	unsigned long rx_pkt_start = 0;
1426 	void *dle_addr;
1427 	struct abr_vc_table  *abr_vc_table;
1428 	u16 *vc_table;
1429 	u16 *reass_table;
1430 	int i,j, vcsize_sel;
1431 	u_short freeq_st_adr;
1432 	u_short *freeq_start;
1433 
1434 	iadev = INPH_IA_DEV(dev);
1435   //    spin_lock_init(&iadev->rx_lock);
1436 
1437 	/* Allocate 4k bytes - more aligned than needed (4k boundary) */
1438 	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1439 				      &iadev->rx_dle_dma, GFP_KERNEL);
1440 	if (!dle_addr)  {
1441 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1442 		goto err_out;
1443 	}
1444 	iadev->rx_dle_q.start = (struct dle *)dle_addr;
1445 	iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1446 	iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1447 	iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1448 	/* the end of the dle q points to the entry after the last
1449 	DLE that can be used. */
1450 
1451 	/* write the upper 20 bits of the start address to rx list address register */
1452 	/* We know this is 32bit bus addressed so the following is safe */
1453 	writel(iadev->rx_dle_dma & 0xfffff000,
1454 	       iadev->dma + IPHASE5575_RX_LIST_ADDR);
1455 	IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1456                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1457                       readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1458 	printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1459                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1460                       readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1461 
1462 	writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1463 	writew(0, iadev->reass_reg+MODE_REG);
1464 	writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1465 
1466 	/* Receive side control memory map
1467 	   -------------------------------
1468 
1469 		Buffer descr	0x0000 (736 - 23K)
1470 		VP Table	0x5c00 (256 - 512)
1471 		Except q	0x5e00 (128 - 512)
1472 		Free buffer q	0x6000 (1K - 2K)
1473 		Packet comp q	0x6800 (1K - 2K)
1474 		Reass Table	0x7000 (1K - 2K)
1475 		VC Table	0x7800 (1K - 2K)
1476 		ABR VC Table	0x8000 (1K - 32K)
1477 	*/
1478 
1479 	/* Base address for Buffer Descriptor Table */
1480 	writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1481 	/* Set the buffer size register */
1482 	writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1483 
1484 	/* Initialize each entry in the Buffer Descriptor Table */
1485         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1486 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1487 	memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1488 	buf_desc_ptr++;
1489 	rx_pkt_start = iadev->rx_pkt_ram;
1490 	for(i=1; i<=iadev->num_rx_desc; i++)
1491 	{
1492 		memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1493 		buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1494 		buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1495 		buf_desc_ptr++;
1496 		rx_pkt_start += iadev->rx_buf_sz;
1497 	}
1498 	IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1499         i = FREE_BUF_DESC_Q*iadev->memSize;
1500 	writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE);
1501         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1502         writew(i+iadev->num_rx_desc*sizeof(u_short),
1503                                          iadev->reass_reg+FREEQ_ED_ADR);
1504         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1505         writew(i+iadev->num_rx_desc*sizeof(u_short),
1506                                         iadev->reass_reg+FREEQ_WR_PTR);
1507 	/* Fill the FREEQ with all the free descriptors. */
1508 	freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1509 	freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1510 	for(i=1; i<=iadev->num_rx_desc; i++)
1511 	{
1512 		*freeq_start = (u_short)i;
1513 		freeq_start++;
1514 	}
1515 	IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1516         /* Packet Complete Queue */
1517         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1518         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1519         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1520         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1521         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1522 
1523         /* Exception Queue */
1524         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1525         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1526         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1527                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1528         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1529         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1530 
1531     	/* Load local copy of FREEQ and PCQ ptrs */
1532         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1533        	iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1534 	iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1535 	iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1536         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1537 	iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1538 	iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1539 	iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1540 
1541         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1542               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1543               iadev->rfL.pcq_wr);)
1544 	/* just for check - no VP TBL */
1545 	/* VP Table */
1546 	/* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1547 	/* initialize VP Table for invalid VPIs
1548 		- I guess we can write all 1s or 0x000f in the entire memory
1549 		  space or something similar.
1550 	*/
1551 
1552 	/* This seems to work and looks right to me too !!! */
1553         i =  REASS_TABLE * iadev->memSize;
1554 	writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1555  	/* initialize Reassembly table to I don't know what ???? */
1556 	reass_table = (u16 *)(iadev->reass_ram+i);
1557         j = REASS_TABLE_SZ * iadev->memSize;
1558 	for(i=0; i < j; i++)
1559 		*reass_table++ = NO_AAL5_PKT;
1560        i = 8*1024;
1561        vcsize_sel =  0;
1562        while (i != iadev->num_vc) {
1563           i /= 2;
1564           vcsize_sel++;
1565        }
1566        i = RX_VC_TABLE * iadev->memSize;
1567        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1568        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1569         j = RX_VC_TABLE_SZ * iadev->memSize;
1570 	for(i = 0; i < j; i++)
1571 	{
1572 		/* shift the reassembly pointer by 3 + lower 3 bits of
1573 		vc_lkup_base register (=3 for 1K VCs) and the last byte
1574 		is those low 3 bits.
1575 		Shall program this later.
1576 		*/
1577 		*vc_table = (i << 6) | 15;	/* for invalid VCI */
1578 		vc_table++;
1579 	}
1580         /* ABR VC table */
1581         i =  ABR_VC_TABLE * iadev->memSize;
1582         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1583 
1584         i = ABR_VC_TABLE * iadev->memSize;
1585 	abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1586         j = REASS_TABLE_SZ * iadev->memSize;
1587         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1588     	for(i = 0; i < j; i++) {
1589 		abr_vc_table->rdf = 0x0003;
1590              	abr_vc_table->air = 0x5eb1;
1591 	       	abr_vc_table++;
1592         }
1593 
1594 	/* Initialize other registers */
1595 
1596 	/* VP Filter Register set for VC Reassembly only */
1597 	writew(0xff00, iadev->reass_reg+VP_FILTER);
1598         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1599 	writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1600 
1601 	/* Packet Timeout Count  related Registers :
1602 	   Set packet timeout to occur in about 3 seconds
1603 	   Set Packet Aging Interval count register to overflow in about 4 us
1604  	*/
1605         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1606 
1607         i = (j >> 6) & 0xFF;
1608         j += 2 * (j - 1);
1609         i |= ((j << 2) & 0xFF00);
1610         writew(i, iadev->reass_reg+TMOUT_RANGE);
1611 
1612         /* initiate the desc_tble */
1613         for(i=0; i<iadev->num_tx_desc;i++)
1614             iadev->desc_tbl[i].timestamp = 0;
1615 
1616 	/* to clear the interrupt status register - read it */
1617 	readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1618 
1619 	/* Mask Register - clear it */
1620 	writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1621 
1622 	skb_queue_head_init(&iadev->rx_dma_q);
1623 	iadev->rx_free_desc_qhead = NULL;
1624 
1625 	iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
1626 	if (!iadev->rx_open) {
1627 		printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1628 		dev->number);
1629 		goto err_free_dle;
1630 	}
1631 
1632         iadev->rxing = 1;
1633         iadev->rx_pkt_cnt = 0;
1634 	/* Mode Register */
1635 	writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1636 	return 0;
1637 
1638 err_free_dle:
1639 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1640 			  iadev->rx_dle_dma);
1641 err_out:
1642 	return -ENOMEM;
1643 }
1644 
1645 
1646 /*
1647 	The memory map suggested in appendix A and the coding for it.
1648 	Keeping it around just in case we change our mind later.
1649 
1650 		Buffer descr	0x0000 (128 - 4K)
1651 		UBR sched	0x1000 (1K - 4K)
1652 		UBR Wait q	0x2000 (1K - 4K)
1653 		Commn queues	0x3000 Packet Ready, Trasmit comp(0x3100)
1654 					(128 - 256) each
1655 		extended VC	0x4000 (1K - 8K)
1656 		ABR sched	0x6000	and ABR wait queue (1K - 2K) each
1657 		CBR sched	0x7000 (as needed)
1658 		VC table	0x8000 (1K - 32K)
1659 */
1660 
1661 static void tx_intr(struct atm_dev *dev)
1662 {
1663 	IADEV *iadev;
1664 	unsigned short status;
1665         unsigned long flags;
1666 
1667 	iadev = INPH_IA_DEV(dev);
1668 
1669 	status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1670         if (status & TRANSMIT_DONE){
1671 
1672            IF_EVENT(printk("Transmit Done Intr logic run\n");)
1673            spin_lock_irqsave(&iadev->tx_lock, flags);
1674            ia_tx_poll(iadev);
1675            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1676            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1677            if (iadev->close_pending)
1678                wake_up(&iadev->close_wait);
1679         }
1680 	if (status & TCQ_NOT_EMPTY)
1681 	{
1682 	    IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1683 	}
1684 }
1685 
1686 static void tx_dle_intr(struct atm_dev *dev)
1687 {
1688         IADEV *iadev;
1689         struct dle *dle, *cur_dle;
1690         struct sk_buff *skb;
1691         struct atm_vcc *vcc;
1692         struct ia_vcc  *iavcc;
1693         u_int dle_lp;
1694         unsigned long flags;
1695 
1696         iadev = INPH_IA_DEV(dev);
1697         spin_lock_irqsave(&iadev->tx_lock, flags);
1698         dle = iadev->tx_dle_q.read;
1699         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1700                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1701         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1702         while (dle != cur_dle)
1703         {
1704             /* free the DMAed skb */
1705             skb = skb_dequeue(&iadev->tx_dma_q);
1706             if (!skb) break;
1707 
1708 	    /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1709 	    if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1710 		dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1711 				 DMA_TO_DEVICE);
1712 	    }
1713             vcc = ATM_SKB(skb)->vcc;
1714             if (!vcc) {
1715                   printk("tx_dle_intr: vcc is null\n");
1716 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1717                   dev_kfree_skb_any(skb);
1718 
1719                   return;
1720             }
1721             iavcc = INPH_IA_VCC(vcc);
1722             if (!iavcc) {
1723                   printk("tx_dle_intr: iavcc is null\n");
1724 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1725                   dev_kfree_skb_any(skb);
1726                   return;
1727             }
1728             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1729                if ((vcc->pop) && (skb->len != 0))
1730                {
1731                  vcc->pop(vcc, skb);
1732                }
1733                else {
1734                  dev_kfree_skb_any(skb);
1735                }
1736             }
1737             else { /* Hold the rate-limited skb for flow control */
1738                IA_SKB_STATE(skb) |= IA_DLED;
1739                skb_queue_tail(&iavcc->txing_skb, skb);
1740             }
1741             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1742             if (++dle == iadev->tx_dle_q.end)
1743                  dle = iadev->tx_dle_q.start;
1744         }
1745         iadev->tx_dle_q.read = dle;
1746         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1747 }
1748 
1749 static int open_tx(struct atm_vcc *vcc)
1750 {
1751 	struct ia_vcc *ia_vcc;
1752 	IADEV *iadev;
1753 	struct main_vc *vc;
1754 	struct ext_vc *evc;
1755         int ret;
1756 	IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1757 	if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1758 	iadev = INPH_IA_DEV(vcc->dev);
1759 
1760         if (iadev->phy_type & FE_25MBIT_PHY) {
1761            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1762                printk("IA:  ABR not support\n");
1763                return -EINVAL;
1764            }
1765 	  if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1766                printk("IA:  CBR not support\n");
1767                return -EINVAL;
1768           }
1769         }
1770         ia_vcc =  INPH_IA_VCC(vcc);
1771         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1772         if (vcc->qos.txtp.max_sdu >
1773                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1774            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1775 		  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1776 	   vcc->dev_data = NULL;
1777            kfree(ia_vcc);
1778            return -EINVAL;
1779         }
1780 	ia_vcc->vc_desc_cnt = 0;
1781         ia_vcc->txing = 1;
1782 
1783         /* find pcr */
1784         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1785            vcc->qos.txtp.pcr = iadev->LineRate;
1786         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1787            vcc->qos.txtp.pcr = iadev->LineRate;
1788         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1789            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1790         if (vcc->qos.txtp.pcr > iadev->LineRate)
1791              vcc->qos.txtp.pcr = iadev->LineRate;
1792         ia_vcc->pcr = vcc->qos.txtp.pcr;
1793 
1794         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1795         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1796         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1797         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1798         if (ia_vcc->pcr < iadev->rate_limit)
1799            skb_queue_head_init (&ia_vcc->txing_skb);
1800         if (ia_vcc->pcr < iadev->rate_limit) {
1801 	   struct sock *sk = sk_atm(vcc);
1802 
1803 	   if (vcc->qos.txtp.max_sdu != 0) {
1804                if (ia_vcc->pcr > 60000)
1805                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1806                else if (ia_vcc->pcr > 2000)
1807                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1808                else
1809                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1810            }
1811            else
1812              sk->sk_sndbuf = 24576;
1813         }
1814 
1815 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1816 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1817 	vc += vcc->vci;
1818 	evc += vcc->vci;
1819 	memset((caddr_t)vc, 0, sizeof(*vc));
1820 	memset((caddr_t)evc, 0, sizeof(*evc));
1821 
1822 	/* store the most significant 4 bits of vci as the last 4 bits
1823 		of first part of atm header.
1824 	   store the last 12 bits of vci as first 12 bits of the second
1825 		part of the atm header.
1826 	*/
1827 	evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1828 	evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1829 
1830 	/* check the following for different traffic classes */
1831 	if (vcc->qos.txtp.traffic_class == ATM_UBR)
1832 	{
1833 		vc->type = UBR;
1834                 vc->status = CRC_APPEND;
1835 		vc->acr = cellrate_to_float(iadev->LineRate);
1836                 if (vcc->qos.txtp.pcr > 0)
1837                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1838                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1839                                              vcc->qos.txtp.max_pcr,vc->acr);)
1840 	}
1841 	else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1842 	{       srv_cls_param_t srv_p;
1843 		IF_ABR(printk("Tx ABR VCC\n");)
1844                 init_abr_vc(iadev, &srv_p);
1845                 if (vcc->qos.txtp.pcr > 0)
1846                    srv_p.pcr = vcc->qos.txtp.pcr;
1847                 if (vcc->qos.txtp.min_pcr > 0) {
1848                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1849                    if (tmpsum > iadev->LineRate)
1850                        return -EBUSY;
1851                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1852                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1853                 }
1854                 else srv_p.mcr = 0;
1855                 if (vcc->qos.txtp.icr)
1856                    srv_p.icr = vcc->qos.txtp.icr;
1857                 if (vcc->qos.txtp.tbe)
1858                    srv_p.tbe = vcc->qos.txtp.tbe;
1859                 if (vcc->qos.txtp.frtt)
1860                    srv_p.frtt = vcc->qos.txtp.frtt;
1861                 if (vcc->qos.txtp.rif)
1862                    srv_p.rif = vcc->qos.txtp.rif;
1863                 if (vcc->qos.txtp.rdf)
1864                    srv_p.rdf = vcc->qos.txtp.rdf;
1865                 if (vcc->qos.txtp.nrm_pres)
1866                    srv_p.nrm = vcc->qos.txtp.nrm;
1867                 if (vcc->qos.txtp.trm_pres)
1868                    srv_p.trm = vcc->qos.txtp.trm;
1869                 if (vcc->qos.txtp.adtf_pres)
1870                    srv_p.adtf = vcc->qos.txtp.adtf;
1871                 if (vcc->qos.txtp.cdf_pres)
1872                    srv_p.cdf = vcc->qos.txtp.cdf;
1873                 if (srv_p.icr > srv_p.pcr)
1874                    srv_p.icr = srv_p.pcr;
1875                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n",
1876                                                       srv_p.pcr, srv_p.mcr);)
1877 		ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1878 	} else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1879                 if (iadev->phy_type & FE_25MBIT_PHY) {
1880                     printk("IA:  CBR not support\n");
1881                     return -EINVAL;
1882                 }
1883                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1884                    IF_CBR(printk("PCR is not available\n");)
1885                    return -1;
1886                 }
1887                 vc->type = CBR;
1888                 vc->status = CRC_APPEND;
1889                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1890                     return ret;
1891                 }
1892 	} else {
1893 		printk("iadev:  Non UBR, ABR and CBR traffic not supported\n");
1894 	}
1895 
1896         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1897 	IF_EVENT(printk("ia open_tx returning \n");)
1898 	return 0;
1899 }
1900 
1901 
1902 static int tx_init(struct atm_dev *dev)
1903 {
1904 	IADEV *iadev;
1905 	struct tx_buf_desc *buf_desc_ptr;
1906 	unsigned int tx_pkt_start;
1907 	void *dle_addr;
1908 	int i;
1909 	u_short tcq_st_adr;
1910 	u_short *tcq_start;
1911 	u_short prq_st_adr;
1912 	u_short *prq_start;
1913 	struct main_vc *vc;
1914 	struct ext_vc *evc;
1915         u_short tmp16;
1916         u32 vcsize_sel;
1917 
1918 	iadev = INPH_IA_DEV(dev);
1919         spin_lock_init(&iadev->tx_lock);
1920 
1921 	IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1922                                 readw(iadev->seg_reg+SEG_MASK_REG));)
1923 
1924 	/* Allocate 4k (boundary aligned) bytes */
1925 	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1926 				      &iadev->tx_dle_dma, GFP_KERNEL);
1927 	if (!dle_addr)  {
1928 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1929 		goto err_out;
1930 	}
1931 	iadev->tx_dle_q.start = (struct dle*)dle_addr;
1932 	iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1933 	iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1934 	iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1935 
1936 	/* write the upper 20 bits of the start address to tx list address register */
1937 	writel(iadev->tx_dle_dma & 0xfffff000,
1938 	       iadev->dma + IPHASE5575_TX_LIST_ADDR);
1939 	writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1940 	writew(0, iadev->seg_reg+MODE_REG_0);
1941 	writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1942         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1943         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1944         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1945 
1946 	/*
1947 	   Transmit side control memory map
1948 	   --------------------------------
1949 	 Buffer descr 	0x0000 (128 - 4K)
1950 	 Commn queues	0x1000	Transmit comp, Packet ready(0x1400)
1951 					(512 - 1K) each
1952 					TCQ - 4K, PRQ - 5K
1953 	 CBR Table 	0x1800 (as needed) - 6K
1954 	 UBR Table	0x3000 (1K - 4K) - 12K
1955 	 UBR Wait queue	0x4000 (1K - 4K) - 16K
1956 	 ABR sched	0x5000	and ABR wait queue (1K - 2K) each
1957 				ABR Tbl - 20K, ABR Wq - 22K
1958 	 extended VC	0x6000 (1K - 8K) - 24K
1959 	 VC Table	0x8000 (1K - 32K) - 32K
1960 
1961 	Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1962 	and Wait q, which can be allotted later.
1963 	*/
1964 
1965 	/* Buffer Descriptor Table Base address */
1966 	writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1967 
1968 	/* initialize each entry in the buffer descriptor table */
1969 	buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1970 	memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1971 	buf_desc_ptr++;
1972 	tx_pkt_start = TX_PACKET_RAM;
1973 	for(i=1; i<=iadev->num_tx_desc; i++)
1974 	{
1975 		memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1976 		buf_desc_ptr->desc_mode = AAL5;
1977 		buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1978 		buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1979 		buf_desc_ptr++;
1980 		tx_pkt_start += iadev->tx_buf_sz;
1981 	}
1982 	iadev->tx_buf = kmalloc_array(iadev->num_tx_desc,
1983 				      sizeof(*iadev->tx_buf),
1984 				      GFP_KERNEL);
1985         if (!iadev->tx_buf) {
1986             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1987 	    goto err_free_dle;
1988         }
1989        	for (i= 0; i< iadev->num_tx_desc; i++)
1990        	{
1991 	    struct cpcs_trailer *cpcs;
1992 
1993        	    cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1994             if(!cpcs) {
1995 		printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1996 		goto err_free_tx_bufs;
1997             }
1998 	    iadev->tx_buf[i].cpcs = cpcs;
1999 	    iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
2000 						       cpcs,
2001 						       sizeof(*cpcs),
2002 						       DMA_TO_DEVICE);
2003         }
2004 	iadev->desc_tbl = kmalloc_array(iadev->num_tx_desc,
2005 					sizeof(*iadev->desc_tbl),
2006 					GFP_KERNEL);
2007 	if (!iadev->desc_tbl) {
2008 		printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2009 		goto err_free_all_tx_bufs;
2010 	}
2011 
2012 	/* Communication Queues base address */
2013         i = TX_COMP_Q * iadev->memSize;
2014 	writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2015 
2016 	/* Transmit Complete Queue */
2017 	writew(i, iadev->seg_reg+TCQ_ST_ADR);
2018 	writew(i, iadev->seg_reg+TCQ_RD_PTR);
2019 	writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2020 	iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2021         writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2022                                               iadev->seg_reg+TCQ_ED_ADR);
2023 	/* Fill the TCQ with all the free descriptors. */
2024 	tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2025 	tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2026 	for(i=1; i<=iadev->num_tx_desc; i++)
2027 	{
2028 		*tcq_start = (u_short)i;
2029 		tcq_start++;
2030 	}
2031 
2032 	/* Packet Ready Queue */
2033         i = PKT_RDY_Q * iadev->memSize;
2034 	writew(i, iadev->seg_reg+PRQ_ST_ADR);
2035 	writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2036                                               iadev->seg_reg+PRQ_ED_ADR);
2037 	writew(i, iadev->seg_reg+PRQ_RD_PTR);
2038 	writew(i, iadev->seg_reg+PRQ_WR_PTR);
2039 
2040         /* Load local copy of PRQ and TCQ ptrs */
2041         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2042 	iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2043  	iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2044 
2045 	iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2046 	iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2047 	iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2048 
2049 	/* Just for safety initializing the queue to have desc 1 always */
2050 	/* Fill the PRQ with all the free descriptors. */
2051 	prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2052 	prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2053 	for(i=1; i<=iadev->num_tx_desc; i++)
2054 	{
2055 		*prq_start = (u_short)0;	/* desc 1 in all entries */
2056 		prq_start++;
2057 	}
2058 	/* CBR Table */
2059         IF_INIT(printk("Start CBR Init\n");)
2060 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2061         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2062 #else /* Charlie's logic is wrong ? */
2063         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2064         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2065         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2066 #endif
2067 
2068         IF_INIT(printk("value in register = 0x%x\n",
2069                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2070         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2071         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2072         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2073                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2074         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2075         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2076         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2077         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2078                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2079         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2080           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2081           readw(iadev->seg_reg+CBR_TAB_END+1));)
2082 
2083         /* Initialize the CBR Schedualing Table */
2084         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2085                                                           0, iadev->num_vc*6);
2086         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2087         iadev->CbrEntryPt = 0;
2088         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2089         iadev->NumEnabledCBR = 0;
2090 
2091 	/* UBR scheduling Table and wait queue */
2092 	/* initialize all bytes of UBR scheduler table and wait queue to 0
2093 		- SCHEDSZ is 1K (# of entries).
2094 		- UBR Table size is 4K
2095 		- UBR wait queue is 4K
2096 	   since the table and wait queues are contiguous, all the bytes
2097 	   can be initialized by one memeset.
2098 	*/
2099 
2100         vcsize_sel = 0;
2101         i = 8*1024;
2102         while (i != iadev->num_vc) {
2103           i /= 2;
2104           vcsize_sel++;
2105         }
2106 
2107         i = MAIN_VC_TABLE * iadev->memSize;
2108         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2109         i =  EXT_VC_TABLE * iadev->memSize;
2110         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2111         i = UBR_SCHED_TABLE * iadev->memSize;
2112         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2113         i = UBR_WAIT_Q * iadev->memSize;
2114         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2115  	memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2116                                                        0, iadev->num_vc*8);
2117 	/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2118 	/* initialize all bytes of ABR scheduler table and wait queue to 0
2119 		- SCHEDSZ is 1K (# of entries).
2120 		- ABR Table size is 2K
2121 		- ABR wait queue is 2K
2122 	   since the table and wait queues are contiguous, all the bytes
2123 	   can be initialized by one memeset.
2124 	*/
2125         i = ABR_SCHED_TABLE * iadev->memSize;
2126         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2127         i = ABR_WAIT_Q * iadev->memSize;
2128         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2129 
2130         i = ABR_SCHED_TABLE*iadev->memSize;
2131 	memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2132 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2133 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2134 	iadev->testTable = kmalloc_array(iadev->num_vc,
2135 					 sizeof(*iadev->testTable),
2136 					 GFP_KERNEL);
2137         if (!iadev->testTable) {
2138            printk("Get freepage  failed\n");
2139 	   goto err_free_desc_tbl;
2140         }
2141 	for(i=0; i<iadev->num_vc; i++)
2142 	{
2143 		memset((caddr_t)vc, 0, sizeof(*vc));
2144 		memset((caddr_t)evc, 0, sizeof(*evc));
2145                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2146 						GFP_KERNEL);
2147 		if (!iadev->testTable[i])
2148 			goto err_free_test_tables;
2149               	iadev->testTable[i]->lastTime = 0;
2150  		iadev->testTable[i]->fract = 0;
2151                 iadev->testTable[i]->vc_status = VC_UBR;
2152 		vc++;
2153 		evc++;
2154 	}
2155 
2156 	/* Other Initialization */
2157 
2158 	/* Max Rate Register */
2159         if (iadev->phy_type & FE_25MBIT_PHY) {
2160 	   writew(RATE25, iadev->seg_reg+MAXRATE);
2161 	   writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2162         }
2163         else {
2164 	   writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2165 	   writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2166         }
2167 	/* Set Idle Header Reigisters to be sure */
2168 	writew(0, iadev->seg_reg+IDLEHEADHI);
2169 	writew(0, iadev->seg_reg+IDLEHEADLO);
2170 
2171 	/* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2172         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2173 
2174         iadev->close_pending = 0;
2175         init_waitqueue_head(&iadev->close_wait);
2176         init_waitqueue_head(&iadev->timeout_wait);
2177 	skb_queue_head_init(&iadev->tx_dma_q);
2178 	ia_init_rtn_q(&iadev->tx_return_q);
2179 
2180 	/* RM Cell Protocol ID and Message Type */
2181 	writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2182         skb_queue_head_init (&iadev->tx_backlog);
2183 
2184 	/* Mode Register 1 */
2185 	writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2186 
2187 	/* Mode Register 0 */
2188 	writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2189 
2190 	/* Interrupt Status Register - read to clear */
2191 	readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2192 
2193 	/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2194         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2195         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2196         iadev->tx_pkt_cnt = 0;
2197         iadev->rate_limit = iadev->LineRate / 3;
2198 
2199 	return 0;
2200 
2201 err_free_test_tables:
2202 	while (--i >= 0)
2203 		kfree(iadev->testTable[i]);
2204 	kfree(iadev->testTable);
2205 err_free_desc_tbl:
2206 	kfree(iadev->desc_tbl);
2207 err_free_all_tx_bufs:
2208 	i = iadev->num_tx_desc;
2209 err_free_tx_bufs:
2210 	while (--i >= 0) {
2211 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2212 
2213 		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2214 				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2215 		kfree(desc->cpcs);
2216 	}
2217 	kfree(iadev->tx_buf);
2218 err_free_dle:
2219 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2220 			  iadev->tx_dle_dma);
2221 err_out:
2222 	return -ENOMEM;
2223 }
2224 
2225 static irqreturn_t ia_int(int irq, void *dev_id)
2226 {
2227    struct atm_dev *dev;
2228    IADEV *iadev;
2229    unsigned int status;
2230    int handled = 0;
2231 
2232    dev = dev_id;
2233    iadev = INPH_IA_DEV(dev);
2234    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2235    {
2236 	handled = 1;
2237         IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2238 	if (status & STAT_REASSINT)
2239 	{
2240 	   /* do something */
2241 	   IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2242 	   rx_intr(dev);
2243 	}
2244 	if (status & STAT_DLERINT)
2245 	{
2246 	   /* Clear this bit by writing a 1 to it. */
2247 	   writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2248 	   rx_dle_intr(dev);
2249 	}
2250 	if (status & STAT_SEGINT)
2251 	{
2252 	   /* do something */
2253            IF_EVENT(printk("IA: tx_intr \n");)
2254 	   tx_intr(dev);
2255 	}
2256 	if (status & STAT_DLETINT)
2257 	{
2258 	   writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2259 	   tx_dle_intr(dev);
2260 	}
2261 	if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2262 	{
2263            if (status & STAT_FEINT)
2264                ia_frontend_intr(iadev);
2265 	}
2266    }
2267    return IRQ_RETVAL(handled);
2268 }
2269 
2270 
2271 
2272 /*----------------------------- entries --------------------------------*/
2273 static int get_esi(struct atm_dev *dev)
2274 {
2275 	IADEV *iadev;
2276 	int i;
2277 	u32 mac1;
2278 	u16 mac2;
2279 
2280 	iadev = INPH_IA_DEV(dev);
2281 	mac1 = cpu_to_be32(le32_to_cpu(readl(
2282 				iadev->reg+IPHASE5575_MAC1)));
2283 	mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2284 	IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2285 	for (i=0; i<MAC1_LEN; i++)
2286 		dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2287 
2288 	for (i=0; i<MAC2_LEN; i++)
2289 		dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2290 	return 0;
2291 }
2292 
2293 static int reset_sar(struct atm_dev *dev)
2294 {
2295 	IADEV *iadev;
2296 	int i, error = 1;
2297 	unsigned int pci[64];
2298 
2299 	iadev = INPH_IA_DEV(dev);
2300 	for(i=0; i<64; i++)
2301 	  if ((error = pci_read_config_dword(iadev->pci,
2302 				i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2303   	      return error;
2304 	writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2305 	for(i=0; i<64; i++)
2306 	  if ((error = pci_write_config_dword(iadev->pci,
2307 					i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2308 	    return error;
2309 	udelay(5);
2310 	return 0;
2311 }
2312 
2313 
2314 static int ia_init(struct atm_dev *dev)
2315 {
2316 	IADEV *iadev;
2317 	unsigned long real_base;
2318 	void __iomem *base;
2319 	unsigned short command;
2320 	int error, i;
2321 
2322 	/* The device has been identified and registered. Now we read
2323 	   necessary configuration info like memory base address,
2324 	   interrupt number etc */
2325 
2326 	IF_INIT(printk(">ia_init\n");)
2327 	dev->ci_range.vpi_bits = 0;
2328 	dev->ci_range.vci_bits = NR_VCI_LD;
2329 
2330 	iadev = INPH_IA_DEV(dev);
2331 	real_base = pci_resource_start (iadev->pci, 0);
2332 	iadev->irq = iadev->pci->irq;
2333 
2334 	error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2335 	if (error) {
2336 		printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2337 				dev->number,error);
2338 		return -EINVAL;
2339 	}
2340 	IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2341 			dev->number, iadev->pci->revision, real_base, iadev->irq);)
2342 
2343 	/* find mapping size of board */
2344 
2345 	iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2346 
2347         if (iadev->pci_map_size == 0x100000){
2348           iadev->num_vc = 4096;
2349 	  dev->ci_range.vci_bits = NR_VCI_4K_LD;
2350           iadev->memSize = 4;
2351         }
2352         else if (iadev->pci_map_size == 0x40000) {
2353           iadev->num_vc = 1024;
2354           iadev->memSize = 1;
2355         }
2356         else {
2357            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2358            return -EINVAL;
2359         }
2360 	IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2361 
2362 	/* enable bus mastering */
2363 	pci_set_master(iadev->pci);
2364 
2365 	/*
2366 	 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2367 	 */
2368 	udelay(10);
2369 
2370 	/* mapping the physical address to a virtual address in address space */
2371 	base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */
2372 
2373 	if (!base)
2374 	{
2375 		printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2376 			    dev->number);
2377 		return -ENOMEM;
2378 	}
2379 	IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2380 			dev->number, iadev->pci->revision, base, iadev->irq);)
2381 
2382 	/* filling the iphase dev structure */
2383 	iadev->mem = iadev->pci_map_size /2;
2384 	iadev->real_base = real_base;
2385 	iadev->base = base;
2386 
2387 	/* Bus Interface Control Registers */
2388 	iadev->reg = base + REG_BASE;
2389 	/* Segmentation Control Registers */
2390 	iadev->seg_reg = base + SEG_BASE;
2391 	/* Reassembly Control Registers */
2392 	iadev->reass_reg = base + REASS_BASE;
2393 	/* Front end/ DMA control registers */
2394 	iadev->phy = base + PHY_BASE;
2395 	iadev->dma = base + PHY_BASE;
2396 	/* RAM - Segmentation RAm and Reassembly RAM */
2397 	iadev->ram = base + ACTUAL_RAM_BASE;
2398 	iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2399 	iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2400 
2401 	/* lets print out the above */
2402 	IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2403           iadev->reg,iadev->seg_reg,iadev->reass_reg,
2404           iadev->phy, iadev->ram, iadev->seg_ram,
2405           iadev->reass_ram);)
2406 
2407 	/* lets try reading the MAC address */
2408 	error = get_esi(dev);
2409 	if (error) {
2410 	  iounmap(iadev->base);
2411 	  return error;
2412 	}
2413         printk("IA: ");
2414 	for (i=0; i < ESI_LEN; i++)
2415                 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2416         printk("\n");
2417 
2418         /* reset SAR */
2419         if (reset_sar(dev)) {
2420 	   iounmap(iadev->base);
2421            printk("IA: reset SAR fail, please try again\n");
2422            return 1;
2423         }
2424 	return 0;
2425 }
2426 
2427 static void ia_update_stats(IADEV *iadev) {
2428     if (!iadev->carrier_detect)
2429         return;
2430     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2431     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2432     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2433     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2434     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2435     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2436     return;
2437 }
2438 
2439 static void ia_led_timer(struct timer_list *unused) {
2440  	unsigned long flags;
2441   	static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2442         u_char i;
2443         static u32 ctrl_reg;
2444         for (i = 0; i < iadev_count; i++) {
2445            if (ia_dev[i]) {
2446 	      ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2447 	      if (blinking[i] == 0) {
2448 		 blinking[i]++;
2449                  ctrl_reg &= (~CTRL_LED);
2450                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2451                  ia_update_stats(ia_dev[i]);
2452               }
2453               else {
2454 		 blinking[i] = 0;
2455 		 ctrl_reg |= CTRL_LED;
2456                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2457                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2458                  if (ia_dev[i]->close_pending)
2459                     wake_up(&ia_dev[i]->close_wait);
2460                  ia_tx_poll(ia_dev[i]);
2461                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2462               }
2463            }
2464         }
2465 	mod_timer(&ia_timer, jiffies + HZ / 4);
2466  	return;
2467 }
2468 
2469 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2470 	unsigned long addr)
2471 {
2472 	writel(value, INPH_IA_DEV(dev)->phy+addr);
2473 }
2474 
2475 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2476 {
2477 	return readl(INPH_IA_DEV(dev)->phy+addr);
2478 }
2479 
2480 static void ia_free_tx(IADEV *iadev)
2481 {
2482 	int i;
2483 
2484 	kfree(iadev->desc_tbl);
2485 	for (i = 0; i < iadev->num_vc; i++)
2486 		kfree(iadev->testTable[i]);
2487 	kfree(iadev->testTable);
2488 	for (i = 0; i < iadev->num_tx_desc; i++) {
2489 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2490 
2491 		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2492 				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2493 		kfree(desc->cpcs);
2494 	}
2495 	kfree(iadev->tx_buf);
2496 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2497 			  iadev->tx_dle_dma);
2498 }
2499 
2500 static void ia_free_rx(IADEV *iadev)
2501 {
2502 	kfree(iadev->rx_open);
2503 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2504 			  iadev->rx_dle_dma);
2505 }
2506 
2507 static int ia_start(struct atm_dev *dev)
2508 {
2509 	IADEV *iadev;
2510 	int error;
2511 	unsigned char phy;
2512 	u32 ctrl_reg;
2513 	IF_EVENT(printk(">ia_start\n");)
2514 	iadev = INPH_IA_DEV(dev);
2515         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2516                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2517                     dev->number, iadev->irq);
2518 		error = -EAGAIN;
2519 		goto err_out;
2520         }
2521         /* @@@ should release IRQ on error */
2522 	/* enabling memory + master */
2523         if ((error = pci_write_config_word(iadev->pci,
2524 				PCI_COMMAND,
2525 				PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2526 	{
2527                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2528                     "master (0x%x)\n",dev->number, error);
2529 		error = -EIO;
2530 		goto err_free_irq;
2531         }
2532 	udelay(10);
2533 
2534 	/* Maybe we should reset the front end, initialize Bus Interface Control
2535 		Registers and see. */
2536 
2537 	IF_INIT(printk("Bus ctrl reg: %08x\n",
2538                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2539 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2540 	ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2541 			| CTRL_B8
2542 			| CTRL_B16
2543 			| CTRL_B32
2544 			| CTRL_B48
2545 			| CTRL_B64
2546 			| CTRL_B128
2547 			| CTRL_ERRMASK
2548 			| CTRL_DLETMASK		/* shud be removed l8r */
2549 			| CTRL_DLERMASK
2550 			| CTRL_SEGMASK
2551 			| CTRL_REASSMASK
2552 			| CTRL_FEMASK
2553 			| CTRL_CSPREEMPT;
2554 
2555        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2556 
2557 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2558                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2559 	   printk("Bus status reg after init: %08x\n",
2560                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2561 
2562         ia_hw_type(iadev);
2563 	error = tx_init(dev);
2564 	if (error)
2565 		goto err_free_irq;
2566 	error = rx_init(dev);
2567 	if (error)
2568 		goto err_free_tx;
2569 
2570 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2571        	writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2572 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2573                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2574         phy = 0; /* resolve compiler complaint */
2575         IF_INIT (
2576 	if ((phy=ia_phy_get(dev,0)) == 0x30)
2577 		printk("IA: pm5346,rev.%d\n",phy&0x0f);
2578 	else
2579 		printk("IA: utopia,rev.%0x\n",phy);)
2580 
2581 	if (iadev->phy_type &  FE_25MBIT_PHY)
2582            ia_mb25_init(iadev);
2583 	else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2584            ia_suni_pm7345_init(iadev);
2585 	else {
2586 		error = suni_init(dev);
2587 		if (error)
2588 			goto err_free_rx;
2589 		if (dev->phy->start) {
2590 			error = dev->phy->start(dev);
2591 			if (error)
2592 				goto err_free_rx;
2593 		}
2594 		/* Get iadev->carrier_detect status */
2595 		ia_frontend_intr(iadev);
2596 	}
2597 	return 0;
2598 
2599 err_free_rx:
2600 	ia_free_rx(iadev);
2601 err_free_tx:
2602 	ia_free_tx(iadev);
2603 err_free_irq:
2604 	free_irq(iadev->irq, dev);
2605 err_out:
2606 	return error;
2607 }
2608 
2609 static void ia_close(struct atm_vcc *vcc)
2610 {
2611 	DEFINE_WAIT(wait);
2612         u16 *vc_table;
2613         IADEV *iadev;
2614         struct ia_vcc *ia_vcc;
2615         struct sk_buff *skb = NULL;
2616         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2617         unsigned long closetime, flags;
2618 
2619         iadev = INPH_IA_DEV(vcc->dev);
2620         ia_vcc = INPH_IA_VCC(vcc);
2621 	if (!ia_vcc) return;
2622 
2623         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n",
2624                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2625 	clear_bit(ATM_VF_READY,&vcc->flags);
2626         skb_queue_head_init (&tmp_tx_backlog);
2627         skb_queue_head_init (&tmp_vcc_backlog);
2628         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2629            iadev->close_pending++;
2630 	   prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2631 	   schedule_timeout(msecs_to_jiffies(500));
2632 	   finish_wait(&iadev->timeout_wait, &wait);
2633            spin_lock_irqsave(&iadev->tx_lock, flags);
2634            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2635               if (ATM_SKB(skb)->vcc == vcc){
2636                  if (vcc->pop) vcc->pop(vcc, skb);
2637                  else dev_kfree_skb_any(skb);
2638               }
2639               else
2640                  skb_queue_tail(&tmp_tx_backlog, skb);
2641            }
2642            while((skb = skb_dequeue(&tmp_tx_backlog)))
2643              skb_queue_tail(&iadev->tx_backlog, skb);
2644            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2645            closetime = 300000 / ia_vcc->pcr;
2646            if (closetime == 0)
2647               closetime = 1;
2648            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2649            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2650            spin_lock_irqsave(&iadev->tx_lock, flags);
2651            iadev->close_pending--;
2652            iadev->testTable[vcc->vci]->lastTime = 0;
2653            iadev->testTable[vcc->vci]->fract = 0;
2654            iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2655            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2656               if (vcc->qos.txtp.min_pcr > 0)
2657                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2658            }
2659            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2660               ia_vcc = INPH_IA_VCC(vcc);
2661               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2662               ia_cbrVc_close (vcc);
2663            }
2664            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2665         }
2666 
2667         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2668            // reset reass table
2669            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2670            vc_table += vcc->vci;
2671            *vc_table = NO_AAL5_PKT;
2672            // reset vc table
2673            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2674            vc_table += vcc->vci;
2675            *vc_table = (vcc->vci << 6) | 15;
2676            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2677               struct abr_vc_table __iomem *abr_vc_table =
2678                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2679               abr_vc_table +=  vcc->vci;
2680               abr_vc_table->rdf = 0x0003;
2681               abr_vc_table->air = 0x5eb1;
2682            }
2683            // Drain the packets
2684            rx_dle_intr(vcc->dev);
2685            iadev->rx_open[vcc->vci] = NULL;
2686         }
2687 	kfree(INPH_IA_VCC(vcc));
2688         ia_vcc = NULL;
2689         vcc->dev_data = NULL;
2690         clear_bit(ATM_VF_ADDR,&vcc->flags);
2691         return;
2692 }
2693 
2694 static int ia_open(struct atm_vcc *vcc)
2695 {
2696 	struct ia_vcc *ia_vcc;
2697 	int error;
2698 	if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2699 	{
2700 		IF_EVENT(printk("ia: not partially allocated resources\n");)
2701 		vcc->dev_data = NULL;
2702 	}
2703 	if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2704 	{
2705 		IF_EVENT(printk("iphase open: unspec part\n");)
2706 		set_bit(ATM_VF_ADDR,&vcc->flags);
2707 	}
2708 	if (vcc->qos.aal != ATM_AAL5)
2709 		return -EINVAL;
2710 	IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2711                                  vcc->dev->number, vcc->vpi, vcc->vci);)
2712 
2713 	/* Device dependent initialization */
2714 	ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2715 	if (!ia_vcc) return -ENOMEM;
2716 	vcc->dev_data = ia_vcc;
2717 
2718 	if ((error = open_rx(vcc)))
2719 	{
2720 		IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2721 		ia_close(vcc);
2722 		return error;
2723 	}
2724 
2725 	if ((error = open_tx(vcc)))
2726 	{
2727 		IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2728 		ia_close(vcc);
2729 		return error;
2730 	}
2731 
2732 	set_bit(ATM_VF_READY,&vcc->flags);
2733 
2734 #if 0
2735         {
2736            static u8 first = 1;
2737            if (first) {
2738               ia_timer.expires = jiffies + 3*HZ;
2739               add_timer(&ia_timer);
2740               first = 0;
2741            }
2742         }
2743 #endif
2744 	IF_EVENT(printk("ia open returning\n");)
2745 	return 0;
2746 }
2747 
2748 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2749 {
2750 	IF_EVENT(printk(">ia_change_qos\n");)
2751 	return 0;
2752 }
2753 
2754 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2755 {
2756    IA_CMDBUF ia_cmds;
2757    IADEV *iadev;
2758    int i, board;
2759    u16 __user *tmps;
2760    IF_EVENT(printk(">ia_ioctl\n");)
2761    if (cmd != IA_CMD) {
2762       if (!dev->phy->ioctl) return -EINVAL;
2763       return dev->phy->ioctl(dev,cmd,arg);
2764    }
2765    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2766    board = ia_cmds.status;
2767 
2768 	if ((board < 0) || (board > iadev_count))
2769 		board = 0;
2770 	board = array_index_nospec(board, iadev_count + 1);
2771 
2772    iadev = ia_dev[board];
2773    switch (ia_cmds.cmd) {
2774    case MEMDUMP:
2775    {
2776 	switch (ia_cmds.sub_cmd) {
2777           case MEMDUMP_SEGREG:
2778 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2779              tmps = (u16 __user *)ia_cmds.buf;
2780              for(i=0; i<0x80; i+=2, tmps++)
2781                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2782              ia_cmds.status = 0;
2783              ia_cmds.len = 0x80;
2784              break;
2785           case MEMDUMP_REASSREG:
2786 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2787              tmps = (u16 __user *)ia_cmds.buf;
2788              for(i=0; i<0x80; i+=2, tmps++)
2789                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2790              ia_cmds.status = 0;
2791              ia_cmds.len = 0x80;
2792              break;
2793           case MEMDUMP_FFL:
2794           {
2795              ia_regs_t       *regs_local;
2796              ffredn_t        *ffL;
2797              rfredn_t        *rfL;
2798 
2799 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2800 	     regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2801 	     if (!regs_local) return -ENOMEM;
2802 	     ffL = &regs_local->ffredn;
2803 	     rfL = &regs_local->rfredn;
2804              /* Copy real rfred registers into the local copy */
2805  	     for (i=0; i<(sizeof (rfredn_t))/4; i++)
2806                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2807              	/* Copy real ffred registers into the local copy */
2808 	     for (i=0; i<(sizeof (ffredn_t))/4; i++)
2809                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2810 
2811              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2812                 kfree(regs_local);
2813                 return -EFAULT;
2814              }
2815              kfree(regs_local);
2816              printk("Board %d registers dumped\n", board);
2817              ia_cmds.status = 0;
2818 	 }
2819     	     break;
2820          case READ_REG:
2821          {
2822 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2823              desc_dbg(iadev);
2824              ia_cmds.status = 0;
2825          }
2826              break;
2827          case 0x6:
2828          {
2829              ia_cmds.status = 0;
2830              printk("skb = 0x%p\n", skb_peek(&iadev->tx_backlog));
2831              printk("rtn_q: 0x%p\n",ia_deque_rtn_q(&iadev->tx_return_q));
2832          }
2833              break;
2834          case 0x8:
2835          {
2836              struct k_sonet_stats *stats;
2837              stats = &PRIV(_ia_dev[board])->sonet_stats;
2838              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2839              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2840              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2841              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2842              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2843              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2844              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2845              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2846              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2847          }
2848             ia_cmds.status = 0;
2849             break;
2850          case 0x9:
2851 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2852             for (i = 1; i <= iadev->num_rx_desc; i++)
2853                free_desc(_ia_dev[board], i);
2854             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2855                                             iadev->reass_reg+REASS_MASK_REG);
2856             iadev->rxing = 1;
2857 
2858             ia_cmds.status = 0;
2859             break;
2860 
2861          case 0xb:
2862 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2863             ia_frontend_intr(iadev);
2864             break;
2865          case 0xa:
2866 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2867          {
2868              ia_cmds.status = 0;
2869              IADebugFlag = ia_cmds.maddr;
2870              printk("New debug option loaded\n");
2871          }
2872              break;
2873          default:
2874              ia_cmds.status = 0;
2875              break;
2876       }
2877    }
2878       break;
2879    default:
2880       break;
2881 
2882    }
2883    return 0;
2884 }
2885 
2886 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2887         IADEV *iadev;
2888         struct dle *wr_ptr;
2889         struct tx_buf_desc __iomem *buf_desc_ptr;
2890         int desc;
2891         int comp_code;
2892         int total_len;
2893         struct cpcs_trailer *trailer;
2894         struct ia_vcc *iavcc;
2895 
2896         iadev = INPH_IA_DEV(vcc->dev);
2897         iavcc = INPH_IA_VCC(vcc);
2898         if (!iavcc->txing) {
2899            printk("discard packet on closed VC\n");
2900            if (vcc->pop)
2901 		vcc->pop(vcc, skb);
2902            else
2903 		dev_kfree_skb_any(skb);
2904 	   return 0;
2905         }
2906 
2907         if (skb->len > iadev->tx_buf_sz - 8) {
2908            printk("Transmit size over tx buffer size\n");
2909            if (vcc->pop)
2910                  vcc->pop(vcc, skb);
2911            else
2912                  dev_kfree_skb_any(skb);
2913           return 0;
2914         }
2915         if ((unsigned long)skb->data & 3) {
2916            printk("Misaligned SKB\n");
2917            if (vcc->pop)
2918                  vcc->pop(vcc, skb);
2919            else
2920                  dev_kfree_skb_any(skb);
2921            return 0;
2922         }
2923 	/* Get a descriptor number from our free descriptor queue
2924 	   We get the descr number from the TCQ now, since I am using
2925 	   the TCQ as a free buffer queue. Initially TCQ will be
2926 	   initialized with all the descriptors and is hence, full.
2927 	*/
2928 	desc = get_desc (iadev, iavcc);
2929 	if (desc == 0xffff)
2930 	    return 1;
2931 	comp_code = desc >> 13;
2932 	desc &= 0x1fff;
2933 
2934 	if ((desc == 0) || (desc > iadev->num_tx_desc))
2935 	{
2936 		IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2937                 atomic_inc(&vcc->stats->tx);
2938 		if (vcc->pop)
2939 		    vcc->pop(vcc, skb);
2940 		else
2941 		    dev_kfree_skb_any(skb);
2942 		return 0;   /* return SUCCESS */
2943 	}
2944 
2945 	if (comp_code)
2946 	{
2947 	    IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2948                                                             desc, comp_code);)
2949 	}
2950 
2951         /* remember the desc and vcc mapping */
2952         iavcc->vc_desc_cnt++;
2953         iadev->desc_tbl[desc-1].iavcc = iavcc;
2954         iadev->desc_tbl[desc-1].txskb = skb;
2955         IA_SKB_STATE(skb) = 0;
2956 
2957         iadev->ffL.tcq_rd += 2;
2958         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2959 	  	iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2960 	writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2961 
2962 	/* Put the descriptor number in the packet ready queue
2963 		and put the updated write pointer in the DLE field
2964 	*/
2965 	*(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2966 
2967  	iadev->ffL.prq_wr += 2;
2968         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2969                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2970 
2971 	/* Figure out the exact length of the packet and padding required to
2972            make it  aligned on a 48 byte boundary.  */
2973 	total_len = skb->len + sizeof(struct cpcs_trailer);
2974 	total_len = ((total_len + 47) / 48) * 48;
2975 	IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2976 
2977 	/* Put the packet in a tx buffer */
2978 	trailer = iadev->tx_buf[desc-1].cpcs;
2979         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2980                   skb, skb->data, skb->len, desc);)
2981 	trailer->control = 0;
2982         /*big endian*/
2983 	trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2984 	trailer->crc32 = 0;	/* not needed - dummy bytes */
2985 
2986 	/* Display the packet */
2987 	IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2988                                                         skb->len, tcnter++);
2989         xdump(skb->data, skb->len, "TX: ");
2990         printk("\n");)
2991 
2992 	/* Build the buffer descriptor */
2993 	buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2994 	buf_desc_ptr += desc;	/* points to the corresponding entry */
2995 	buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2996 	/* Huh ? p.115 of users guide describes this as a read-only register */
2997         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2998 	buf_desc_ptr->vc_index = vcc->vci;
2999 	buf_desc_ptr->bytes = total_len;
3000 
3001         if (vcc->qos.txtp.traffic_class == ATM_ABR)
3002 	   clear_lockup (vcc, iadev);
3003 
3004 	/* Build the DLE structure */
3005 	wr_ptr = iadev->tx_dle_q.write;
3006 	memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3007 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3008 					      skb->len, DMA_TO_DEVICE);
3009 	wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3010                                                   buf_desc_ptr->buf_start_lo;
3011 	/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3012 	wr_ptr->bytes = skb->len;
3013 
3014         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3015         if ((wr_ptr->bytes >> 2) == 0xb)
3016            wr_ptr->bytes = 0x30;
3017 
3018 	wr_ptr->mode = TX_DLE_PSI;
3019 	wr_ptr->prq_wr_ptr_data = 0;
3020 
3021 	/* end is not to be used for the DLE q */
3022 	if (++wr_ptr == iadev->tx_dle_q.end)
3023 		wr_ptr = iadev->tx_dle_q.start;
3024 
3025         /* Build trailer dle */
3026         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3027         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3028           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3029 
3030         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3031         wr_ptr->mode = DMA_INT_ENABLE;
3032         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3033 
3034         /* end is not to be used for the DLE q */
3035         if (++wr_ptr == iadev->tx_dle_q.end)
3036                 wr_ptr = iadev->tx_dle_q.start;
3037 
3038 	iadev->tx_dle_q.write = wr_ptr;
3039         ATM_DESC(skb) = vcc->vci;
3040         skb_queue_tail(&iadev->tx_dma_q, skb);
3041 
3042         atomic_inc(&vcc->stats->tx);
3043         iadev->tx_pkt_cnt++;
3044 	/* Increment transaction counter */
3045 	writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3046 
3047 #if 0
3048         /* add flow control logic */
3049         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3050           if (iavcc->vc_desc_cnt > 10) {
3051              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3052             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3053               iavcc->flow_inc = -1;
3054               iavcc->saved_tx_quota = vcc->tx_quota;
3055            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3056              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3057              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3058               iavcc->flow_inc = 0;
3059            }
3060         }
3061 #endif
3062 	IF_TX(printk("ia send done\n");)
3063 	return 0;
3064 }
3065 
3066 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3067 {
3068         IADEV *iadev;
3069         unsigned long flags;
3070 
3071         iadev = INPH_IA_DEV(vcc->dev);
3072         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3073         {
3074             if (!skb)
3075                 printk(KERN_CRIT "null skb in ia_send\n");
3076             else dev_kfree_skb_any(skb);
3077             return -EINVAL;
3078         }
3079         spin_lock_irqsave(&iadev->tx_lock, flags);
3080         if (!test_bit(ATM_VF_READY,&vcc->flags)){
3081             dev_kfree_skb_any(skb);
3082             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3083             return -EINVAL;
3084         }
3085         ATM_SKB(skb)->vcc = vcc;
3086 
3087         if (skb_peek(&iadev->tx_backlog)) {
3088            skb_queue_tail(&iadev->tx_backlog, skb);
3089         }
3090         else {
3091            if (ia_pkt_tx (vcc, skb)) {
3092               skb_queue_tail(&iadev->tx_backlog, skb);
3093            }
3094         }
3095         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3096         return 0;
3097 
3098 }
3099 
3100 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3101 {
3102   int   left = *pos, n;
3103   char  *tmpPtr;
3104   IADEV *iadev = INPH_IA_DEV(dev);
3105   if(!left--) {
3106      if (iadev->phy_type == FE_25MBIT_PHY) {
3107        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3108        return n;
3109      }
3110      if (iadev->phy_type == FE_DS3_PHY)
3111         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3112      else if (iadev->phy_type == FE_E3_PHY)
3113         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3114      else if (iadev->phy_type == FE_UTP_OPTION)
3115          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155");
3116      else
3117         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3118      tmpPtr = page + n;
3119      if (iadev->pci_map_size == 0x40000)
3120         n += sprintf(tmpPtr, "-1KVC-");
3121      else
3122         n += sprintf(tmpPtr, "-4KVC-");
3123      tmpPtr = page + n;
3124      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3125         n += sprintf(tmpPtr, "1M  \n");
3126      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3127         n += sprintf(tmpPtr, "512K\n");
3128      else
3129        n += sprintf(tmpPtr, "128K\n");
3130      return n;
3131   }
3132   if (!left) {
3133      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3134                            "  Size of Tx Buffer  :  %u\n"
3135                            "  Number of Rx Buffer:  %u\n"
3136                            "  Size of Rx Buffer  :  %u\n"
3137                            "  Packets Received   :  %u\n"
3138                            "  Packets Transmitted:  %u\n"
3139                            "  Cells Received     :  %u\n"
3140                            "  Cells Transmitted  :  %u\n"
3141                            "  Board Dropped Cells:  %u\n"
3142                            "  Board Dropped Pkts :  %u\n",
3143                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3144                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3145                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3146                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3147                            iadev->drop_rxcell, iadev->drop_rxpkt);
3148   }
3149   return 0;
3150 }
3151 
3152 static const struct atmdev_ops ops = {
3153 	.open		= ia_open,
3154 	.close		= ia_close,
3155 	.ioctl		= ia_ioctl,
3156 	.send		= ia_send,
3157 	.phy_put	= ia_phy_put,
3158 	.phy_get	= ia_phy_get,
3159 	.change_qos	= ia_change_qos,
3160 	.proc_read	= ia_proc_read,
3161 	.owner		= THIS_MODULE,
3162 };
3163 
3164 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3165 {
3166 	struct atm_dev *dev;
3167 	IADEV *iadev;
3168 	int ret;
3169 
3170 	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3171 	if (!iadev) {
3172 		ret = -ENOMEM;
3173 		goto err_out;
3174 	}
3175 
3176 	iadev->pci = pdev;
3177 
3178 	IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3179 		pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3180 	if (pci_enable_device(pdev)) {
3181 		ret = -ENODEV;
3182 		goto err_out_free_iadev;
3183 	}
3184 	dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3185 	if (!dev) {
3186 		ret = -ENOMEM;
3187 		goto err_out_disable_dev;
3188 	}
3189 	dev->dev_data = iadev;
3190 	IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3191 	IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3192 		iadev->LineRate);)
3193 
3194 	pci_set_drvdata(pdev, dev);
3195 
3196 	ia_dev[iadev_count] = iadev;
3197 	_ia_dev[iadev_count] = dev;
3198 	iadev_count++;
3199 	if (ia_init(dev) || ia_start(dev)) {
3200 		IF_INIT(printk("IA register failed!\n");)
3201 		iadev_count--;
3202 		ia_dev[iadev_count] = NULL;
3203 		_ia_dev[iadev_count] = NULL;
3204 		ret = -EINVAL;
3205 		goto err_out_deregister_dev;
3206 	}
3207 	IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3208 
3209 	iadev->next_board = ia_boards;
3210 	ia_boards = dev;
3211 
3212 	return 0;
3213 
3214 err_out_deregister_dev:
3215 	atm_dev_deregister(dev);
3216 err_out_disable_dev:
3217 	pci_disable_device(pdev);
3218 err_out_free_iadev:
3219 	kfree(iadev);
3220 err_out:
3221 	return ret;
3222 }
3223 
3224 static void ia_remove_one(struct pci_dev *pdev)
3225 {
3226 	struct atm_dev *dev = pci_get_drvdata(pdev);
3227 	IADEV *iadev = INPH_IA_DEV(dev);
3228 
3229 	/* Disable phy interrupts */
3230 	ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3231 				   SUNI_RSOP_CIE);
3232 	udelay(1);
3233 
3234 	if (dev->phy && dev->phy->stop)
3235 		dev->phy->stop(dev);
3236 
3237 	/* De-register device */
3238       	free_irq(iadev->irq, dev);
3239 	iadev_count--;
3240 	ia_dev[iadev_count] = NULL;
3241 	_ia_dev[iadev_count] = NULL;
3242 	IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3243 	atm_dev_deregister(dev);
3244 
3245       	iounmap(iadev->base);
3246 	pci_disable_device(pdev);
3247 
3248 	ia_free_rx(iadev);
3249 	ia_free_tx(iadev);
3250 
3251       	kfree(iadev);
3252 }
3253 
3254 static const struct pci_device_id ia_pci_tbl[] = {
3255 	{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3256 	{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3257 	{ 0,}
3258 };
3259 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3260 
3261 static struct pci_driver ia_driver = {
3262 	.name =         DEV_LABEL,
3263 	.id_table =     ia_pci_tbl,
3264 	.probe =        ia_init_one,
3265 	.remove =       ia_remove_one,
3266 };
3267 
3268 static int __init ia_module_init(void)
3269 {
3270 	int ret;
3271 
3272 	ret = pci_register_driver(&ia_driver);
3273 	if (ret >= 0) {
3274 		ia_timer.expires = jiffies + 3*HZ;
3275 		add_timer(&ia_timer);
3276 	} else
3277 		printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3278 	return ret;
3279 }
3280 
3281 static void __exit ia_module_exit(void)
3282 {
3283 	pci_unregister_driver(&ia_driver);
3284 
3285 	del_timer_sync(&ia_timer);
3286 }
3287 
3288 module_init(ia_module_init);
3289 module_exit(ia_module_exit);
3290