xref: /openbmc/linux/drivers/atm/iphase.c (revision 034f90b3)
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards
3                     Author: Peter Wang  <pwang@iphase.com>
4 		   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>
6                                Version: 1.0
7 *******************************************************************************
8 
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13 
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18 
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20       was originally written by Monalisa Agrawal at UNH. Now this driver
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23       in terms of PHY type, the size of control memory and the size of
24       packet memory. The followings are the change log and history:
25 
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32 	  Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38 
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40 
41 *******************************************************************************/
42 
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/mm.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/sonet.h>
51 #include <linux/skbuff.h>
52 #include <linux/time.h>
53 #include <linux/delay.h>
54 #include <linux/uio.h>
55 #include <linux/init.h>
56 #include <linux/interrupt.h>
57 #include <linux/wait.h>
58 #include <linux/slab.h>
59 #include <asm/io.h>
60 #include <linux/atomic.h>
61 #include <asm/uaccess.h>
62 #include <asm/string.h>
63 #include <asm/byteorder.h>
64 #include <linux/vmalloc.h>
65 #include <linux/jiffies.h>
66 #include "iphase.h"
67 #include "suni.h"
68 #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
69 
70 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
71 
72 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
73 static void desc_dbg(IADEV *iadev);
74 
75 static IADEV *ia_dev[8];
76 static struct atm_dev *_ia_dev[8];
77 static int iadev_count;
78 static void ia_led_timer(unsigned long arg);
79 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
80 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
81 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
82 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
83             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
84 
85 module_param(IA_TX_BUF, int, 0);
86 module_param(IA_TX_BUF_SZ, int, 0);
87 module_param(IA_RX_BUF, int, 0);
88 module_param(IA_RX_BUF_SZ, int, 0);
89 module_param(IADebugFlag, uint, 0644);
90 
91 MODULE_LICENSE("GPL");
92 
93 /**************************** IA_LIB **********************************/
94 
95 static void ia_init_rtn_q (IARTN_Q *que)
96 {
97    que->next = NULL;
98    que->tail = NULL;
99 }
100 
101 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
102 {
103    data->next = NULL;
104    if (que->next == NULL)
105       que->next = que->tail = data;
106    else {
107       data->next = que->next;
108       que->next = data;
109    }
110    return;
111 }
112 
113 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
114    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
115    if (!entry) return -1;
116    entry->data = data;
117    entry->next = NULL;
118    if (que->next == NULL)
119       que->next = que->tail = entry;
120    else {
121       que->tail->next = entry;
122       que->tail = que->tail->next;
123    }
124    return 1;
125 }
126 
127 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
128    IARTN_Q *tmpdata;
129    if (que->next == NULL)
130       return NULL;
131    tmpdata = que->next;
132    if ( que->next == que->tail)
133       que->next = que->tail = NULL;
134    else
135       que->next = que->next->next;
136    return tmpdata;
137 }
138 
139 static void ia_hack_tcq(IADEV *dev) {
140 
141   u_short 		desc1;
142   u_short		tcq_wr;
143   struct ia_vcc         *iavcc_r = NULL;
144 
145   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
146   while (dev->host_tcq_wr != tcq_wr) {
147      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
148      if (!desc1) ;
149      else if (!dev->desc_tbl[desc1 -1].timestamp) {
150         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
151         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
152      }
153      else if (dev->desc_tbl[desc1 -1].timestamp) {
154         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
155            printk("IA: Fatal err in get_desc\n");
156            continue;
157         }
158         iavcc_r->vc_desc_cnt--;
159         dev->desc_tbl[desc1 -1].timestamp = 0;
160         IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n",
161                                    dev->desc_tbl[desc1 -1].txskb, desc1);)
162         if (iavcc_r->pcr < dev->rate_limit) {
163            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
164            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
165               printk("ia_hack_tcq: No memory available\n");
166         }
167         dev->desc_tbl[desc1 -1].iavcc = NULL;
168         dev->desc_tbl[desc1 -1].txskb = NULL;
169      }
170      dev->host_tcq_wr += 2;
171      if (dev->host_tcq_wr > dev->ffL.tcq_ed)
172         dev->host_tcq_wr = dev->ffL.tcq_st;
173   }
174 } /* ia_hack_tcq */
175 
176 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
177   u_short 		desc_num, i;
178   struct sk_buff        *skb;
179   struct ia_vcc         *iavcc_r = NULL;
180   unsigned long delta;
181   static unsigned long timer = 0;
182   int ltimeout;
183 
184   ia_hack_tcq (dev);
185   if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) {
186      timer = jiffies;
187      i=0;
188      while (i < dev->num_tx_desc) {
189         if (!dev->desc_tbl[i].timestamp) {
190            i++;
191            continue;
192         }
193         ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
194         delta = jiffies - dev->desc_tbl[i].timestamp;
195         if (delta >= ltimeout) {
196            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
197            if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
198               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
199            else
200               dev->ffL.tcq_rd -= 2;
201            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
202            if (!(skb = dev->desc_tbl[i].txskb) ||
203                           !(iavcc_r = dev->desc_tbl[i].iavcc))
204               printk("Fatal err, desc table vcc or skb is NULL\n");
205            else
206               iavcc_r->vc_desc_cnt--;
207            dev->desc_tbl[i].timestamp = 0;
208            dev->desc_tbl[i].iavcc = NULL;
209            dev->desc_tbl[i].txskb = NULL;
210         }
211         i++;
212      } /* while */
213   }
214   if (dev->ffL.tcq_rd == dev->host_tcq_wr)
215      return 0xFFFF;
216 
217   /* Get the next available descriptor number from TCQ */
218   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
219 
220   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
221      dev->ffL.tcq_rd += 2;
222      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
223 	dev->ffL.tcq_rd = dev->ffL.tcq_st;
224      if (dev->ffL.tcq_rd == dev->host_tcq_wr)
225         return 0xFFFF;
226      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
227   }
228 
229   /* get system time */
230   dev->desc_tbl[desc_num -1].timestamp = jiffies;
231   return desc_num;
232 }
233 
234 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
235   u_char          	foundLockUp;
236   vcstatus_t		*vcstatus;
237   u_short               *shd_tbl;
238   u_short               tempCellSlot, tempFract;
239   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
240   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
241   u_int  i;
242 
243   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
244      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
245      vcstatus->cnt++;
246      foundLockUp = 0;
247      if( vcstatus->cnt == 0x05 ) {
248         abr_vc += vcc->vci;
249 	eabr_vc += vcc->vci;
250 	if( eabr_vc->last_desc ) {
251 	   if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
252               /* Wait for 10 Micro sec */
253               udelay(10);
254 	      if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
255 		 foundLockUp = 1;
256            }
257 	   else {
258 	      tempCellSlot = abr_vc->last_cell_slot;
259               tempFract    = abr_vc->fraction;
260               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
261                          && (tempFract == dev->testTable[vcc->vci]->fract))
262 	         foundLockUp = 1;
263               dev->testTable[vcc->vci]->lastTime = tempCellSlot;
264               dev->testTable[vcc->vci]->fract = tempFract;
265 	   }
266         } /* last descriptor */
267         vcstatus->cnt = 0;
268      } /* vcstatus->cnt */
269 
270      if (foundLockUp) {
271         IF_ABR(printk("LOCK UP found\n");)
272 	writew(0xFFFD, dev->seg_reg+MODE_REG_0);
273         /* Wait for 10 Micro sec */
274         udelay(10);
275         abr_vc->status &= 0xFFF8;
276         abr_vc->status |= 0x0001;  /* state is idle */
277 	shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
278 	for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
279 	if (i < dev->num_vc)
280            shd_tbl[i] = vcc->vci;
281         else
282            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
283         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
284         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
285         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
286 	vcstatus->cnt = 0;
287      } /* foundLockUp */
288 
289   } /* if an ABR VC */
290 
291 
292 }
293 
294 /*
295 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
296 **
297 **  +----+----+------------------+-------------------------------+
298 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
299 **  +----+----+------------------+-------------------------------+
300 **
301 **    R = reserved (written as 0)
302 **    NZ = 0 if 0 cells/sec; 1 otherwise
303 **
304 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
305 */
306 static u16
307 cellrate_to_float(u32 cr)
308 {
309 
310 #define	NZ 		0x4000
311 #define	M_BITS		9		/* Number of bits in mantissa */
312 #define	E_BITS		5		/* Number of bits in exponent */
313 #define	M_MASK		0x1ff
314 #define	E_MASK		0x1f
315   u16   flot;
316   u32	tmp = cr & 0x00ffffff;
317   int 	i   = 0;
318   if (cr == 0)
319      return 0;
320   while (tmp != 1) {
321      tmp >>= 1;
322      i++;
323   }
324   if (i == M_BITS)
325      flot = NZ | (i << M_BITS) | (cr & M_MASK);
326   else if (i < M_BITS)
327      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
328   else
329      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
330   return flot;
331 }
332 
333 #if 0
334 /*
335 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
336 */
337 static u32
338 float_to_cellrate(u16 rate)
339 {
340   u32   exp, mantissa, cps;
341   if ((rate & NZ) == 0)
342      return 0;
343   exp = (rate >> M_BITS) & E_MASK;
344   mantissa = rate & M_MASK;
345   if (exp == 0)
346      return 1;
347   cps = (1 << M_BITS) | mantissa;
348   if (exp == M_BITS)
349      cps = cps;
350   else if (exp > M_BITS)
351      cps <<= (exp - M_BITS);
352   else
353      cps >>= (M_BITS - exp);
354   return cps;
355 }
356 #endif
357 
358 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
359   srv_p->class_type = ATM_ABR;
360   srv_p->pcr        = dev->LineRate;
361   srv_p->mcr        = 0;
362   srv_p->icr        = 0x055cb7;
363   srv_p->tbe        = 0xffffff;
364   srv_p->frtt       = 0x3a;
365   srv_p->rif        = 0xf;
366   srv_p->rdf        = 0xb;
367   srv_p->nrm        = 0x4;
368   srv_p->trm        = 0x7;
369   srv_p->cdf        = 0x3;
370   srv_p->adtf       = 50;
371 }
372 
373 static int
374 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
375                                                 struct atm_vcc *vcc, u8 flag)
376 {
377   f_vc_abr_entry  *f_abr_vc;
378   r_vc_abr_entry  *r_abr_vc;
379   u32		icr;
380   u8		trm, nrm, crm;
381   u16		adtf, air, *ptr16;
382   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
383   f_abr_vc += vcc->vci;
384   switch (flag) {
385      case 1: /* FFRED initialization */
386 #if 0  /* sanity check */
387        if (srv_p->pcr == 0)
388           return INVALID_PCR;
389        if (srv_p->pcr > dev->LineRate)
390           srv_p->pcr = dev->LineRate;
391        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
392 	  return MCR_UNAVAILABLE;
393        if (srv_p->mcr > srv_p->pcr)
394 	  return INVALID_MCR;
395        if (!(srv_p->icr))
396 	  srv_p->icr = srv_p->pcr;
397        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
398 	  return INVALID_ICR;
399        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
400 	  return INVALID_TBE;
401        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
402 	  return INVALID_FRTT;
403        if (srv_p->nrm > MAX_NRM)
404 	  return INVALID_NRM;
405        if (srv_p->trm > MAX_TRM)
406 	  return INVALID_TRM;
407        if (srv_p->adtf > MAX_ADTF)
408           return INVALID_ADTF;
409        else if (srv_p->adtf == 0)
410 	  srv_p->adtf = 1;
411        if (srv_p->cdf > MAX_CDF)
412 	  return INVALID_CDF;
413        if (srv_p->rif > MAX_RIF)
414 	  return INVALID_RIF;
415        if (srv_p->rdf > MAX_RDF)
416 	  return INVALID_RDF;
417 #endif
418        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
419        f_abr_vc->f_vc_type = ABR;
420        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
421 			          /* i.e 2**n = 2 << (n-1) */
422        f_abr_vc->f_nrm = nrm << 8 | nrm;
423        trm = 100000/(2 << (16 - srv_p->trm));
424        if ( trm == 0) trm = 1;
425        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
426        crm = srv_p->tbe / nrm;
427        if (crm == 0) crm = 1;
428        f_abr_vc->f_crm = crm & 0xff;
429        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
430        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
431 				((srv_p->tbe/srv_p->frtt)*1000000) :
432 				(1000000/(srv_p->frtt/srv_p->tbe)));
433        f_abr_vc->f_icr = cellrate_to_float(icr);
434        adtf = (10000 * srv_p->adtf)/8192;
435        if (adtf == 0) adtf = 1;
436        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
437        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
438        f_abr_vc->f_acr = f_abr_vc->f_icr;
439        f_abr_vc->f_status = 0x0042;
440        break;
441     case 0: /* RFRED initialization */
442        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
443        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
444        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
445        r_abr_vc += vcc->vci;
446        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
447        air = srv_p->pcr << (15 - srv_p->rif);
448        if (air == 0) air = 1;
449        r_abr_vc->r_air = cellrate_to_float(air);
450        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
451        dev->sum_mcr	   += srv_p->mcr;
452        dev->n_abr++;
453        break;
454     default:
455        break;
456   }
457   return	0;
458 }
459 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
460    u32 rateLow=0, rateHigh, rate;
461    int entries;
462    struct ia_vcc *ia_vcc;
463 
464    int   idealSlot =0, testSlot, toBeAssigned, inc;
465    u32   spacing;
466    u16  *SchedTbl, *TstSchedTbl;
467    u16  cbrVC, vcIndex;
468    u32   fracSlot    = 0;
469    u32   sp_mod      = 0;
470    u32   sp_mod2     = 0;
471 
472    /* IpAdjustTrafficParams */
473    if (vcc->qos.txtp.max_pcr <= 0) {
474       IF_ERR(printk("PCR for CBR not defined\n");)
475       return -1;
476    }
477    rate = vcc->qos.txtp.max_pcr;
478    entries = rate / dev->Granularity;
479    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
480                                 entries, rate, dev->Granularity);)
481    if (entries < 1)
482       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
483    rateLow  =  entries * dev->Granularity;
484    rateHigh = (entries + 1) * dev->Granularity;
485    if (3*(rate - rateLow) > (rateHigh - rate))
486       entries++;
487    if (entries > dev->CbrRemEntries) {
488       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
489       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
490                                        entries, dev->CbrRemEntries);)
491       return -EBUSY;
492    }
493 
494    ia_vcc = INPH_IA_VCC(vcc);
495    ia_vcc->NumCbrEntry = entries;
496    dev->sum_mcr += entries * dev->Granularity;
497    /* IaFFrednInsertCbrSched */
498    // Starting at an arbitrary location, place the entries into the table
499    // as smoothly as possible
500    cbrVC   = 0;
501    spacing = dev->CbrTotEntries / entries;
502    sp_mod  = dev->CbrTotEntries % entries; // get modulo
503    toBeAssigned = entries;
504    fracSlot = 0;
505    vcIndex  = vcc->vci;
506    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
507    while (toBeAssigned)
508    {
509       // If this is the first time, start the table loading for this connection
510       // as close to entryPoint as possible.
511       if (toBeAssigned == entries)
512       {
513          idealSlot = dev->CbrEntryPt;
514          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
515          if (dev->CbrEntryPt >= dev->CbrTotEntries)
516             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
517       } else {
518          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
519          // in the table that would be  smoothest
520          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
521          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
522       }
523       if (idealSlot >= (int)dev->CbrTotEntries)
524          idealSlot -= dev->CbrTotEntries;
525       // Continuously check around this ideal value until a null
526       // location is encountered.
527       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
528       inc = 0;
529       testSlot = idealSlot;
530       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
531       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n",
532                                 testSlot, TstSchedTbl,toBeAssigned);)
533       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
534       while (cbrVC)  // If another VC at this location, we have to keep looking
535       {
536           inc++;
537           testSlot = idealSlot - inc;
538           if (testSlot < 0) { // Wrap if necessary
539              testSlot += dev->CbrTotEntries;
540              IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n",
541                                                        SchedTbl,testSlot);)
542           }
543           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
544           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
545           if (!cbrVC)
546              break;
547           testSlot = idealSlot + inc;
548           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
549              testSlot -= dev->CbrTotEntries;
550              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
551              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
552                                             testSlot, toBeAssigned);)
553           }
554           // set table index and read in value
555           TstSchedTbl = (u16*)(SchedTbl + testSlot);
556           IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n",
557                           TstSchedTbl,cbrVC,inc);)
558           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
559        } /* while */
560        // Move this VCI number into this location of the CBR Sched table.
561        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl));
562        dev->CbrRemEntries--;
563        toBeAssigned--;
564    } /* while */
565 
566    /* IaFFrednCbrEnable */
567    dev->NumEnabledCBR++;
568    if (dev->NumEnabledCBR == 1) {
569        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
570        IF_CBR(printk("CBR is enabled\n");)
571    }
572    return 0;
573 }
574 static void ia_cbrVc_close (struct atm_vcc *vcc) {
575    IADEV *iadev;
576    u16 *SchedTbl, NullVci = 0;
577    u32 i, NumFound;
578 
579    iadev = INPH_IA_DEV(vcc->dev);
580    iadev->NumEnabledCBR--;
581    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
582    if (iadev->NumEnabledCBR == 0) {
583       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
584       IF_CBR (printk("CBR support disabled\n");)
585    }
586    NumFound = 0;
587    for (i=0; i < iadev->CbrTotEntries; i++)
588    {
589       if (*SchedTbl == vcc->vci) {
590          iadev->CbrRemEntries++;
591          *SchedTbl = NullVci;
592          IF_CBR(NumFound++;)
593       }
594       SchedTbl++;
595    }
596    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
597 }
598 
599 static int ia_avail_descs(IADEV *iadev) {
600    int tmp = 0;
601    ia_hack_tcq(iadev);
602    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
603       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
604    else
605       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
606                    iadev->ffL.tcq_st) / 2;
607    return tmp;
608 }
609 
610 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
611 
612 static int ia_que_tx (IADEV *iadev) {
613    struct sk_buff *skb;
614    int num_desc;
615    struct atm_vcc *vcc;
616    num_desc = ia_avail_descs(iadev);
617 
618    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
619       if (!(vcc = ATM_SKB(skb)->vcc)) {
620          dev_kfree_skb_any(skb);
621          printk("ia_que_tx: Null vcc\n");
622          break;
623       }
624       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
625          dev_kfree_skb_any(skb);
626          printk("Free the SKB on closed vci %d \n", vcc->vci);
627          break;
628       }
629       if (ia_pkt_tx (vcc, skb)) {
630          skb_queue_head(&iadev->tx_backlog, skb);
631       }
632       num_desc--;
633    }
634    return 0;
635 }
636 
637 static void ia_tx_poll (IADEV *iadev) {
638    struct atm_vcc *vcc = NULL;
639    struct sk_buff *skb = NULL, *skb1 = NULL;
640    struct ia_vcc *iavcc;
641    IARTN_Q *  rtne;
642 
643    ia_hack_tcq(iadev);
644    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
645        skb = rtne->data.txskb;
646        if (!skb) {
647            printk("ia_tx_poll: skb is null\n");
648            goto out;
649        }
650        vcc = ATM_SKB(skb)->vcc;
651        if (!vcc) {
652            printk("ia_tx_poll: vcc is null\n");
653            dev_kfree_skb_any(skb);
654 	   goto out;
655        }
656 
657        iavcc = INPH_IA_VCC(vcc);
658        if (!iavcc) {
659            printk("ia_tx_poll: iavcc is null\n");
660            dev_kfree_skb_any(skb);
661 	   goto out;
662        }
663 
664        skb1 = skb_dequeue(&iavcc->txing_skb);
665        while (skb1 && (skb1 != skb)) {
666           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
667              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
668           }
669           IF_ERR(printk("Release the SKB not match\n");)
670           if ((vcc->pop) && (skb1->len != 0))
671           {
672              vcc->pop(vcc, skb1);
673              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
674                                                           (long)skb1);)
675           }
676           else
677              dev_kfree_skb_any(skb1);
678           skb1 = skb_dequeue(&iavcc->txing_skb);
679        }
680        if (!skb1) {
681           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
682           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
683           break;
684        }
685        if ((vcc->pop) && (skb->len != 0))
686        {
687           vcc->pop(vcc, skb);
688           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
689        }
690        else
691           dev_kfree_skb_any(skb);
692        kfree(rtne);
693     }
694     ia_que_tx(iadev);
695 out:
696     return;
697 }
698 #if 0
699 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
700 {
701         u32	t;
702 	int	i;
703 	/*
704 	 * Issue a command to enable writes to the NOVRAM
705 	 */
706 	NVRAM_CMD (EXTEND + EWEN);
707 	NVRAM_CLR_CE;
708 	/*
709 	 * issue the write command
710 	 */
711 	NVRAM_CMD(IAWRITE + addr);
712 	/*
713 	 * Send the data, starting with D15, then D14, and so on for 16 bits
714 	 */
715 	for (i=15; i>=0; i--) {
716 		NVRAM_CLKOUT (val & 0x8000);
717 		val <<= 1;
718 	}
719 	NVRAM_CLR_CE;
720 	CFG_OR(NVCE);
721 	t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
722 	while (!(t & NVDO))
723 		t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
724 
725 	NVRAM_CLR_CE;
726 	/*
727 	 * disable writes again
728 	 */
729 	NVRAM_CMD(EXTEND + EWDS)
730 	NVRAM_CLR_CE;
731 	CFG_AND(~NVDI);
732 }
733 #endif
734 
735 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
736 {
737 	u_short	val;
738         u32	t;
739 	int	i;
740 	/*
741 	 * Read the first bit that was clocked with the falling edge of the
742 	 * the last command data clock
743 	 */
744 	NVRAM_CMD(IAREAD + addr);
745 	/*
746 	 * Now read the rest of the bits, the next bit read is D14, then D13,
747 	 * and so on.
748 	 */
749 	val = 0;
750 	for (i=15; i>=0; i--) {
751 		NVRAM_CLKIN(t);
752 		val |= (t << i);
753 	}
754 	NVRAM_CLR_CE;
755 	CFG_AND(~NVDI);
756 	return val;
757 }
758 
759 static void ia_hw_type(IADEV *iadev) {
760    u_short memType = ia_eeprom_get(iadev, 25);
761    iadev->memType = memType;
762    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
763       iadev->num_tx_desc = IA_TX_BUF;
764       iadev->tx_buf_sz = IA_TX_BUF_SZ;
765       iadev->num_rx_desc = IA_RX_BUF;
766       iadev->rx_buf_sz = IA_RX_BUF_SZ;
767    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
768       if (IA_TX_BUF == DFL_TX_BUFFERS)
769         iadev->num_tx_desc = IA_TX_BUF / 2;
770       else
771         iadev->num_tx_desc = IA_TX_BUF;
772       iadev->tx_buf_sz = IA_TX_BUF_SZ;
773       if (IA_RX_BUF == DFL_RX_BUFFERS)
774         iadev->num_rx_desc = IA_RX_BUF / 2;
775       else
776         iadev->num_rx_desc = IA_RX_BUF;
777       iadev->rx_buf_sz = IA_RX_BUF_SZ;
778    }
779    else {
780       if (IA_TX_BUF == DFL_TX_BUFFERS)
781         iadev->num_tx_desc = IA_TX_BUF / 8;
782       else
783         iadev->num_tx_desc = IA_TX_BUF;
784       iadev->tx_buf_sz = IA_TX_BUF_SZ;
785       if (IA_RX_BUF == DFL_RX_BUFFERS)
786         iadev->num_rx_desc = IA_RX_BUF / 8;
787       else
788         iadev->num_rx_desc = IA_RX_BUF;
789       iadev->rx_buf_sz = IA_RX_BUF_SZ;
790    }
791    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
792    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
793          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
794          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
795 
796 #if 0
797    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
798       iadev->phy_type = PHY_OC3C_S;
799    else if ((memType & FE_MASK) == FE_UTP_OPTION)
800       iadev->phy_type = PHY_UTP155;
801    else
802      iadev->phy_type = PHY_OC3C_M;
803 #endif
804 
805    iadev->phy_type = memType & FE_MASK;
806    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
807                                          memType,iadev->phy_type);)
808    if (iadev->phy_type == FE_25MBIT_PHY)
809       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
810    else if (iadev->phy_type == FE_DS3_PHY)
811       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
812    else if (iadev->phy_type == FE_E3_PHY)
813       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
814    else
815        iadev->LineRate = (u32)(ATM_OC3_PCR);
816    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
817 
818 }
819 
820 static u32 ia_phy_read32(struct iadev_priv *ia, unsigned int reg)
821 {
822 	return readl(ia->phy + (reg >> 2));
823 }
824 
825 static void ia_phy_write32(struct iadev_priv *ia, unsigned int reg, u32 val)
826 {
827 	writel(val, ia->phy + (reg >> 2));
828 }
829 
830 static void ia_frontend_intr(struct iadev_priv *iadev)
831 {
832 	u32 status;
833 
834 	if (iadev->phy_type & FE_25MBIT_PHY) {
835 		status = ia_phy_read32(iadev, MB25_INTR_STATUS);
836 		iadev->carrier_detect = (status & MB25_IS_GSB) ? 1 : 0;
837 	} else if (iadev->phy_type & FE_DS3_PHY) {
838 		ia_phy_read32(iadev, SUNI_DS3_FRM_INTR_STAT);
839 		status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
840 		iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
841 	} else if (iadev->phy_type & FE_E3_PHY) {
842 		ia_phy_read32(iadev, SUNI_E3_FRM_MAINT_INTR_IND);
843 		status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
844 		iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
845 	} else {
846 		status = ia_phy_read32(iadev, SUNI_RSOP_STATUS);
847 		iadev->carrier_detect = (status & SUNI_LOSV) ? 0 : 1;
848 	}
849 
850 	printk(KERN_INFO "IA: SUNI carrier %s\n",
851 		iadev->carrier_detect ? "detected" : "lost signal");
852 }
853 
854 static void ia_mb25_init(struct iadev_priv *iadev)
855 {
856 #if 0
857    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
858 #endif
859 	ia_phy_write32(iadev, MB25_MASTER_CTRL, MB25_MC_DRIC | MB25_MC_DREC);
860 	ia_phy_write32(iadev, MB25_DIAG_CONTROL, 0);
861 
862 	iadev->carrier_detect =
863 		(ia_phy_read32(iadev, MB25_INTR_STATUS) & MB25_IS_GSB) ? 1 : 0;
864 }
865 
866 struct ia_reg {
867 	u16 reg;
868 	u16 val;
869 };
870 
871 static void ia_phy_write(struct iadev_priv *iadev,
872 			 const struct ia_reg *regs, int len)
873 {
874 	while (len--) {
875 		ia_phy_write32(iadev, regs->reg, regs->val);
876 		regs++;
877 	}
878 }
879 
880 static void ia_suni_pm7345_init_ds3(struct iadev_priv *iadev)
881 {
882 	static const struct ia_reg suni_ds3_init [] = {
883 		{ SUNI_DS3_FRM_INTR_ENBL,	0x17 },
884 		{ SUNI_DS3_FRM_CFG,		0x01 },
885 		{ SUNI_DS3_TRAN_CFG,		0x01 },
886 		{ SUNI_CONFIG,			0 },
887 		{ SUNI_SPLR_CFG,		0 },
888 		{ SUNI_SPLT_CFG,		0 }
889 	};
890 	u32 status;
891 
892 	status = ia_phy_read32(iadev, SUNI_DS3_FRM_STAT);
893 	iadev->carrier_detect = (status & SUNI_DS3_LOSV) ? 0 : 1;
894 
895 	ia_phy_write(iadev, suni_ds3_init, ARRAY_SIZE(suni_ds3_init));
896 }
897 
898 static void ia_suni_pm7345_init_e3(struct iadev_priv *iadev)
899 {
900 	static const struct ia_reg suni_e3_init [] = {
901 		{ SUNI_E3_FRM_FRAM_OPTIONS,		0x04 },
902 		{ SUNI_E3_FRM_MAINT_OPTIONS,		0x20 },
903 		{ SUNI_E3_FRM_FRAM_INTR_ENBL,		0x1d },
904 		{ SUNI_E3_FRM_MAINT_INTR_ENBL,		0x30 },
905 		{ SUNI_E3_TRAN_STAT_DIAG_OPTIONS,	0 },
906 		{ SUNI_E3_TRAN_FRAM_OPTIONS,		0x01 },
907 		{ SUNI_CONFIG,				SUNI_PM7345_E3ENBL },
908 		{ SUNI_SPLR_CFG,			0x41 },
909 		{ SUNI_SPLT_CFG,			0x41 }
910 	};
911 	u32 status;
912 
913 	status = ia_phy_read32(iadev, SUNI_E3_FRM_FRAM_INTR_IND_STAT);
914 	iadev->carrier_detect = (status & SUNI_E3_LOS) ? 0 : 1;
915 	ia_phy_write(iadev, suni_e3_init, ARRAY_SIZE(suni_e3_init));
916 }
917 
918 static void ia_suni_pm7345_init(struct iadev_priv *iadev)
919 {
920 	static const struct ia_reg suni_init [] = {
921 		/* Enable RSOP loss of signal interrupt. */
922 		{ SUNI_INTR_ENBL,		0x28 },
923 		/* Clear error counters. */
924 		{ SUNI_ID_RESET,		0 },
925 		/* Clear "PMCTST" in master test register. */
926 		{ SUNI_MASTER_TEST,		0 },
927 
928 		{ SUNI_RXCP_CTRL,		0x2c },
929 		{ SUNI_RXCP_FCTRL,		0x81 },
930 
931 		{ SUNI_RXCP_IDLE_PAT_H1,	0 },
932 		{ SUNI_RXCP_IDLE_PAT_H2,	0 },
933 		{ SUNI_RXCP_IDLE_PAT_H3,	0 },
934 		{ SUNI_RXCP_IDLE_PAT_H4,	0x01 },
935 
936 		{ SUNI_RXCP_IDLE_MASK_H1,	0xff },
937 		{ SUNI_RXCP_IDLE_MASK_H2,	0xff },
938 		{ SUNI_RXCP_IDLE_MASK_H3,	0xff },
939 		{ SUNI_RXCP_IDLE_MASK_H4,	0xfe },
940 
941 		{ SUNI_RXCP_CELL_PAT_H1,	0 },
942 		{ SUNI_RXCP_CELL_PAT_H2,	0 },
943 		{ SUNI_RXCP_CELL_PAT_H3,	0 },
944 		{ SUNI_RXCP_CELL_PAT_H4,	0x01 },
945 
946 		{ SUNI_RXCP_CELL_MASK_H1,	0xff },
947 		{ SUNI_RXCP_CELL_MASK_H2,	0xff },
948 		{ SUNI_RXCP_CELL_MASK_H3,	0xff },
949 		{ SUNI_RXCP_CELL_MASK_H4,	0xff },
950 
951 		{ SUNI_TXCP_CTRL,		0xa4 },
952 		{ SUNI_TXCP_INTR_EN_STS,	0x10 },
953 		{ SUNI_TXCP_IDLE_PAT_H5,	0x55 }
954 	};
955 
956 	if (iadev->phy_type & FE_DS3_PHY)
957 		ia_suni_pm7345_init_ds3(iadev);
958 	else
959 		ia_suni_pm7345_init_e3(iadev);
960 
961 	ia_phy_write(iadev, suni_init, ARRAY_SIZE(suni_init));
962 
963 	ia_phy_write32(iadev, SUNI_CONFIG, ia_phy_read32(iadev, SUNI_CONFIG) &
964 		~(SUNI_PM7345_LLB | SUNI_PM7345_CLB |
965 		  SUNI_PM7345_DLB | SUNI_PM7345_PLB));
966 #ifdef __SNMP__
967    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
968 #endif /* __SNMP__ */
969    return;
970 }
971 
972 
973 /***************************** IA_LIB END *****************************/
974 
975 #ifdef CONFIG_ATM_IA_DEBUG
976 static int tcnter = 0;
977 static void xdump( u_char*  cp, int  length, char*  prefix )
978 {
979     int col, count;
980     u_char prntBuf[120];
981     u_char*  pBuf = prntBuf;
982     count = 0;
983     while(count < length){
984         pBuf += sprintf( pBuf, "%s", prefix );
985         for(col = 0;count + col < length && col < 16; col++){
986             if (col != 0 && (col % 4) == 0)
987                 pBuf += sprintf( pBuf, " " );
988             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
989         }
990         while(col++ < 16){      /* pad end of buffer with blanks */
991             if ((col % 4) == 0)
992                 sprintf( pBuf, " " );
993             pBuf += sprintf( pBuf, "   " );
994         }
995         pBuf += sprintf( pBuf, "  " );
996         for(col = 0;count + col < length && col < 16; col++){
997             if (isprint((int)cp[count + col]))
998                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
999             else
1000                 pBuf += sprintf( pBuf, "." );
1001                 }
1002         printk("%s\n", prntBuf);
1003         count += col;
1004         pBuf = prntBuf;
1005     }
1006 
1007 }  /* close xdump(... */
1008 #endif /* CONFIG_ATM_IA_DEBUG */
1009 
1010 
1011 static struct atm_dev *ia_boards = NULL;
1012 
1013 #define ACTUAL_RAM_BASE \
1014 	RAM_BASE*((iadev->mem)/(128 * 1024))
1015 #define ACTUAL_SEG_RAM_BASE \
1016 	IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1017 #define ACTUAL_REASS_RAM_BASE \
1018 	IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1019 
1020 
1021 /*-- some utilities and memory allocation stuff will come here -------------*/
1022 
1023 static void desc_dbg(IADEV *iadev) {
1024 
1025   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1026   u32 i;
1027   void __iomem *tmp;
1028   // regval = readl((u32)ia_cmds->maddr);
1029   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1030   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1031                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1032                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1033   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr,
1034                    iadev->ffL.tcq_rd);
1035   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1036   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1037   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1038   i = 0;
1039   while (tcq_st_ptr != tcq_ed_ptr) {
1040       tmp = iadev->seg_ram+tcq_st_ptr;
1041       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1042       tcq_st_ptr += 2;
1043   }
1044   for(i=0; i <iadev->num_tx_desc; i++)
1045       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1046 }
1047 
1048 
1049 /*----------------------------- Receiving side stuff --------------------------*/
1050 
1051 static void rx_excp_rcvd(struct atm_dev *dev)
1052 {
1053 #if 0 /* closing the receiving size will cause too many excp int */
1054   IADEV *iadev;
1055   u_short state;
1056   u_short excpq_rd_ptr;
1057   //u_short *ptr;
1058   int vci, error = 1;
1059   iadev = INPH_IA_DEV(dev);
1060   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1061   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1062   { printk("state = %x \n", state);
1063         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1064  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1065         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1066             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1067         // TODO: update exception stat
1068 	vci = readw(iadev->reass_ram+excpq_rd_ptr);
1069 	error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1070         // pwang_test
1071 	excpq_rd_ptr += 4;
1072 	if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1073  	    excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1074 	writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1075         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1076   }
1077 #endif
1078 }
1079 
1080 static void free_desc(struct atm_dev *dev, int desc)
1081 {
1082 	IADEV *iadev;
1083 	iadev = INPH_IA_DEV(dev);
1084         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1085 	iadev->rfL.fdq_wr +=2;
1086 	if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1087 		iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;
1088 	writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1089 }
1090 
1091 
1092 static int rx_pkt(struct atm_dev *dev)
1093 {
1094 	IADEV *iadev;
1095 	struct atm_vcc *vcc;
1096 	unsigned short status;
1097 	struct rx_buf_desc __iomem *buf_desc_ptr;
1098 	int desc;
1099 	struct dle* wr_ptr;
1100 	int len;
1101 	struct sk_buff *skb;
1102 	u_int buf_addr, dma_addr;
1103 
1104 	iadev = INPH_IA_DEV(dev);
1105 	if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1106 	{
1107    	    printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1108 	    return -EINVAL;
1109 	}
1110 	/* mask 1st 3 bits to get the actual descno. */
1111 	desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1112         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1113                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1114               printk(" pcq_wr_ptr = 0x%x\n",
1115                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1116 	/* update the read pointer  - maybe we shud do this in the end*/
1117 	if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1118 		iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1119 	else
1120 		iadev->rfL.pcq_rd += 2;
1121 	writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1122 
1123 	/* get the buffer desc entry.
1124 		update stuff. - doesn't seem to be any update necessary
1125 	*/
1126 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1127 	/* make the ptr point to the corresponding buffer desc entry */
1128 	buf_desc_ptr += desc;
1129         if (!desc || (desc > iadev->num_rx_desc) ||
1130                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1131             free_desc(dev, desc);
1132             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1133             return -1;
1134         }
1135 	vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1136 	if (!vcc)
1137 	{
1138                 free_desc(dev, desc);
1139 		printk("IA: null vcc, drop PDU\n");
1140 		return -1;
1141 	}
1142 
1143 
1144 	/* might want to check the status bits for errors */
1145 	status = (u_short) (buf_desc_ptr->desc_mode);
1146 	if (status & (RX_CER | RX_PTE | RX_OFL))
1147 	{
1148                 atomic_inc(&vcc->stats->rx_err);
1149 		IF_ERR(printk("IA: bad packet, dropping it");)
1150                 if (status & RX_CER) {
1151                     IF_ERR(printk(" cause: packet CRC error\n");)
1152                 }
1153                 else if (status & RX_PTE) {
1154                     IF_ERR(printk(" cause: packet time out\n");)
1155                 }
1156                 else {
1157                     IF_ERR(printk(" cause: buffer overflow\n");)
1158                 }
1159 		goto out_free_desc;
1160 	}
1161 
1162 	/*
1163 		build DLE.
1164 	*/
1165 
1166 	buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1167 	dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1168 	len = dma_addr - buf_addr;
1169         if (len > iadev->rx_buf_sz) {
1170            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1171            atomic_inc(&vcc->stats->rx_err);
1172 	   goto out_free_desc;
1173         }
1174 
1175         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1176            if (vcc->vci < 32)
1177               printk("Drop control packets\n");
1178 	      goto out_free_desc;
1179         }
1180 	skb_put(skb,len);
1181         // pwang_test
1182         ATM_SKB(skb)->vcc = vcc;
1183         ATM_DESC(skb) = desc;
1184 	skb_queue_tail(&iadev->rx_dma_q, skb);
1185 
1186 	/* Build the DLE structure */
1187 	wr_ptr = iadev->rx_dle_q.write;
1188 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
1189 					      len, DMA_FROM_DEVICE);
1190 	wr_ptr->local_pkt_addr = buf_addr;
1191 	wr_ptr->bytes = len;	/* We don't know this do we ?? */
1192 	wr_ptr->mode = DMA_INT_ENABLE;
1193 
1194 	/* shud take care of wrap around here too. */
1195         if(++wr_ptr == iadev->rx_dle_q.end)
1196              wr_ptr = iadev->rx_dle_q.start;
1197 	iadev->rx_dle_q.write = wr_ptr;
1198 	udelay(1);
1199 	/* Increment transaction counter */
1200 	writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1201 out:	return 0;
1202 out_free_desc:
1203         free_desc(dev, desc);
1204         goto out;
1205 }
1206 
1207 static void rx_intr(struct atm_dev *dev)
1208 {
1209   IADEV *iadev;
1210   u_short status;
1211   u_short state, i;
1212 
1213   iadev = INPH_IA_DEV(dev);
1214   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1215   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1216   if (status & RX_PKT_RCVD)
1217   {
1218 	/* do something */
1219 	/* Basically recvd an interrupt for receiving a packet.
1220 	A descriptor would have been written to the packet complete
1221 	queue. Get all the descriptors and set up dma to move the
1222 	packets till the packet complete queue is empty..
1223 	*/
1224 	state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1225         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1226 	while(!(state & PCQ_EMPTY))
1227 	{
1228              rx_pkt(dev);
1229 	     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1230 	}
1231         iadev->rxing = 1;
1232   }
1233   if (status & RX_FREEQ_EMPT)
1234   {
1235      if (iadev->rxing) {
1236         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1237         iadev->rx_tmp_jif = jiffies;
1238         iadev->rxing = 0;
1239      }
1240      else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) &&
1241                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1242         for (i = 1; i <= iadev->num_rx_desc; i++)
1243                free_desc(dev, i);
1244 printk("Test logic RUN!!!!\n");
1245         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1246         iadev->rxing = 1;
1247      }
1248      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1249   }
1250 
1251   if (status & RX_EXCP_RCVD)
1252   {
1253 	/* probably need to handle the exception queue also. */
1254 	IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1255 	rx_excp_rcvd(dev);
1256   }
1257 
1258 
1259   if (status & RX_RAW_RCVD)
1260   {
1261 	/* need to handle the raw incoming cells. This deepnds on
1262 	whether we have programmed to receive the raw cells or not.
1263 	Else ignore. */
1264 	IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)
1265   }
1266 }
1267 
1268 
1269 static void rx_dle_intr(struct atm_dev *dev)
1270 {
1271   IADEV *iadev;
1272   struct atm_vcc *vcc;
1273   struct sk_buff *skb;
1274   int desc;
1275   u_short state;
1276   struct dle *dle, *cur_dle;
1277   u_int dle_lp;
1278   int len;
1279   iadev = INPH_IA_DEV(dev);
1280 
1281   /* free all the dles done, that is just update our own dle read pointer
1282 	- do we really need to do this. Think not. */
1283   /* DMA is done, just get all the recevie buffers from the rx dma queue
1284 	and push them up to the higher layer protocol. Also free the desc
1285 	associated with the buffer. */
1286   dle = iadev->rx_dle_q.read;
1287   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1288   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1289   while(dle != cur_dle)
1290   {
1291       /* free the DMAed skb */
1292       skb = skb_dequeue(&iadev->rx_dma_q);
1293       if (!skb)
1294          goto INCR_DLE;
1295       desc = ATM_DESC(skb);
1296       free_desc(dev, desc);
1297 
1298       if (!(len = skb->len))
1299       {
1300           printk("rx_dle_intr: skb len 0\n");
1301 	  dev_kfree_skb_any(skb);
1302       }
1303       else
1304       {
1305           struct cpcs_trailer *trailer;
1306           u_short length;
1307           struct ia_vcc *ia_vcc;
1308 
1309 	  dma_unmap_single(&iadev->pci->dev, iadev->rx_dle_q.write->sys_pkt_addr,
1310 			   len, DMA_FROM_DEVICE);
1311           /* no VCC related housekeeping done as yet. lets see */
1312           vcc = ATM_SKB(skb)->vcc;
1313 	  if (!vcc) {
1314 	      printk("IA: null vcc\n");
1315               dev_kfree_skb_any(skb);
1316               goto INCR_DLE;
1317           }
1318           ia_vcc = INPH_IA_VCC(vcc);
1319           if (ia_vcc == NULL)
1320           {
1321              atomic_inc(&vcc->stats->rx_err);
1322              atm_return(vcc, skb->truesize);
1323              dev_kfree_skb_any(skb);
1324              goto INCR_DLE;
1325            }
1326           // get real pkt length  pwang_test
1327           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1328                                  skb->len - sizeof(*trailer));
1329 	  length = swap_byte_order(trailer->length);
1330           if ((length > iadev->rx_buf_sz) || (length >
1331                               (skb->len - sizeof(struct cpcs_trailer))))
1332           {
1333              atomic_inc(&vcc->stats->rx_err);
1334              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)",
1335                                                             length, skb->len);)
1336              atm_return(vcc, skb->truesize);
1337              dev_kfree_skb_any(skb);
1338              goto INCR_DLE;
1339           }
1340           skb_trim(skb, length);
1341 
1342 	  /* Display the packet */
1343 	  IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1344           xdump(skb->data, skb->len, "RX: ");
1345           printk("\n");)
1346 
1347 	  IF_RX(printk("rx_dle_intr: skb push");)
1348 	  vcc->push(vcc,skb);
1349 	  atomic_inc(&vcc->stats->rx);
1350           iadev->rx_pkt_cnt++;
1351       }
1352 INCR_DLE:
1353       if (++dle == iadev->rx_dle_q.end)
1354     	  dle = iadev->rx_dle_q.start;
1355   }
1356   iadev->rx_dle_q.read = dle;
1357 
1358   /* if the interrupts are masked because there were no free desc available,
1359 		unmask them now. */
1360   if (!iadev->rxing) {
1361      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1362      if (!(state & FREEQ_EMPTY)) {
1363         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1364         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1365                                       iadev->reass_reg+REASS_MASK_REG);
1366         iadev->rxing++;
1367      }
1368   }
1369 }
1370 
1371 
1372 static int open_rx(struct atm_vcc *vcc)
1373 {
1374 	IADEV *iadev;
1375 	u_short __iomem *vc_table;
1376 	u_short __iomem *reass_ptr;
1377 	IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1378 
1379 	if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1380 	iadev = INPH_IA_DEV(vcc->dev);
1381         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1382            if (iadev->phy_type & FE_25MBIT_PHY) {
1383                printk("IA:  ABR not support\n");
1384                return -EINVAL;
1385            }
1386         }
1387 	/* Make only this VCI in the vc table valid and let all
1388 		others be invalid entries */
1389 	vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1390 	vc_table += vcc->vci;
1391 	/* mask the last 6 bits and OR it with 3 for 1K VCs */
1392 
1393         *vc_table = vcc->vci << 6;
1394 	/* Also keep a list of open rx vcs so that we can attach them with
1395 		incoming PDUs later. */
1396 	if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1397                                 (vcc->qos.txtp.traffic_class == ATM_ABR))
1398 	{
1399                 srv_cls_param_t srv_p;
1400                 init_abr_vc(iadev, &srv_p);
1401                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1402 	}
1403        	else {  /* for UBR  later may need to add CBR logic */
1404         	reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1405            	reass_ptr += vcc->vci;
1406            	*reass_ptr = NO_AAL5_PKT;
1407        	}
1408 
1409 	if (iadev->rx_open[vcc->vci])
1410 		printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1411 			vcc->dev->number, vcc->vci);
1412 	iadev->rx_open[vcc->vci] = vcc;
1413 	return 0;
1414 }
1415 
1416 static int rx_init(struct atm_dev *dev)
1417 {
1418 	IADEV *iadev;
1419 	struct rx_buf_desc __iomem *buf_desc_ptr;
1420 	unsigned long rx_pkt_start = 0;
1421 	void *dle_addr;
1422 	struct abr_vc_table  *abr_vc_table;
1423 	u16 *vc_table;
1424 	u16 *reass_table;
1425 	int i,j, vcsize_sel;
1426 	u_short freeq_st_adr;
1427 	u_short *freeq_start;
1428 
1429 	iadev = INPH_IA_DEV(dev);
1430   //    spin_lock_init(&iadev->rx_lock);
1431 
1432 	/* Allocate 4k bytes - more aligned than needed (4k boundary) */
1433 	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1434 				      &iadev->rx_dle_dma, GFP_KERNEL);
1435 	if (!dle_addr)  {
1436 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1437 		goto err_out;
1438 	}
1439 	iadev->rx_dle_q.start = (struct dle *)dle_addr;
1440 	iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1441 	iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1442 	iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1443 	/* the end of the dle q points to the entry after the last
1444 	DLE that can be used. */
1445 
1446 	/* write the upper 20 bits of the start address to rx list address register */
1447 	/* We know this is 32bit bus addressed so the following is safe */
1448 	writel(iadev->rx_dle_dma & 0xfffff000,
1449 	       iadev->dma + IPHASE5575_RX_LIST_ADDR);
1450 	IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n",
1451                       iadev->dma+IPHASE5575_TX_LIST_ADDR,
1452                       readl(iadev->dma + IPHASE5575_TX_LIST_ADDR));
1453 	printk("Rx Dle list addr: 0x%p value: 0x%0x\n",
1454                       iadev->dma+IPHASE5575_RX_LIST_ADDR,
1455                       readl(iadev->dma + IPHASE5575_RX_LIST_ADDR));)
1456 
1457 	writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1458 	writew(0, iadev->reass_reg+MODE_REG);
1459 	writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1460 
1461 	/* Receive side control memory map
1462 	   -------------------------------
1463 
1464 		Buffer descr	0x0000 (736 - 23K)
1465 		VP Table	0x5c00 (256 - 512)
1466 		Except q	0x5e00 (128 - 512)
1467 		Free buffer q	0x6000 (1K - 2K)
1468 		Packet comp q	0x6800 (1K - 2K)
1469 		Reass Table	0x7000 (1K - 2K)
1470 		VC Table	0x7800 (1K - 2K)
1471 		ABR VC Table	0x8000 (1K - 32K)
1472 	*/
1473 
1474 	/* Base address for Buffer Descriptor Table */
1475 	writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1476 	/* Set the buffer size register */
1477 	writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1478 
1479 	/* Initialize each entry in the Buffer Descriptor Table */
1480         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1481 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1482 	memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1483 	buf_desc_ptr++;
1484 	rx_pkt_start = iadev->rx_pkt_ram;
1485 	for(i=1; i<=iadev->num_rx_desc; i++)
1486 	{
1487 		memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1488 		buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1489 		buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1490 		buf_desc_ptr++;
1491 		rx_pkt_start += iadev->rx_buf_sz;
1492 	}
1493 	IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);)
1494         i = FREE_BUF_DESC_Q*iadev->memSize;
1495 	writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE);
1496         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1497         writew(i+iadev->num_rx_desc*sizeof(u_short),
1498                                          iadev->reass_reg+FREEQ_ED_ADR);
1499         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1500         writew(i+iadev->num_rx_desc*sizeof(u_short),
1501                                         iadev->reass_reg+FREEQ_WR_PTR);
1502 	/* Fill the FREEQ with all the free descriptors. */
1503 	freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1504 	freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1505 	for(i=1; i<=iadev->num_rx_desc; i++)
1506 	{
1507 		*freeq_start = (u_short)i;
1508 		freeq_start++;
1509 	}
1510 	IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);)
1511         /* Packet Complete Queue */
1512         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1513         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1514         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1515         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1516         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1517 
1518         /* Exception Queue */
1519         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1520         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1521         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1522                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1523         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1524         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1525 
1526     	/* Load local copy of FREEQ and PCQ ptrs */
1527         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1528        	iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1529 	iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1530 	iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1531         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1532 	iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1533 	iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1534 	iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1535 
1536         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1537               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1538               iadev->rfL.pcq_wr);)
1539 	/* just for check - no VP TBL */
1540 	/* VP Table */
1541 	/* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1542 	/* initialize VP Table for invalid VPIs
1543 		- I guess we can write all 1s or 0x000f in the entire memory
1544 		  space or something similar.
1545 	*/
1546 
1547 	/* This seems to work and looks right to me too !!! */
1548         i =  REASS_TABLE * iadev->memSize;
1549 	writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1550  	/* initialize Reassembly table to I don't know what ???? */
1551 	reass_table = (u16 *)(iadev->reass_ram+i);
1552         j = REASS_TABLE_SZ * iadev->memSize;
1553 	for(i=0; i < j; i++)
1554 		*reass_table++ = NO_AAL5_PKT;
1555        i = 8*1024;
1556        vcsize_sel =  0;
1557        while (i != iadev->num_vc) {
1558           i /= 2;
1559           vcsize_sel++;
1560        }
1561        i = RX_VC_TABLE * iadev->memSize;
1562        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1563        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1564         j = RX_VC_TABLE_SZ * iadev->memSize;
1565 	for(i = 0; i < j; i++)
1566 	{
1567 		/* shift the reassembly pointer by 3 + lower 3 bits of
1568 		vc_lkup_base register (=3 for 1K VCs) and the last byte
1569 		is those low 3 bits.
1570 		Shall program this later.
1571 		*/
1572 		*vc_table = (i << 6) | 15;	/* for invalid VCI */
1573 		vc_table++;
1574 	}
1575         /* ABR VC table */
1576         i =  ABR_VC_TABLE * iadev->memSize;
1577         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1578 
1579         i = ABR_VC_TABLE * iadev->memSize;
1580 	abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1581         j = REASS_TABLE_SZ * iadev->memSize;
1582         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1583     	for(i = 0; i < j; i++) {
1584 		abr_vc_table->rdf = 0x0003;
1585              	abr_vc_table->air = 0x5eb1;
1586 	       	abr_vc_table++;
1587         }
1588 
1589 	/* Initialize other registers */
1590 
1591 	/* VP Filter Register set for VC Reassembly only */
1592 	writew(0xff00, iadev->reass_reg+VP_FILTER);
1593         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1594 	writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1595 
1596 	/* Packet Timeout Count  related Registers :
1597 	   Set packet timeout to occur in about 3 seconds
1598 	   Set Packet Aging Interval count register to overflow in about 4 us
1599  	*/
1600         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1601 
1602         i = (j >> 6) & 0xFF;
1603         j += 2 * (j - 1);
1604         i |= ((j << 2) & 0xFF00);
1605         writew(i, iadev->reass_reg+TMOUT_RANGE);
1606 
1607         /* initiate the desc_tble */
1608         for(i=0; i<iadev->num_tx_desc;i++)
1609             iadev->desc_tbl[i].timestamp = 0;
1610 
1611 	/* to clear the interrupt status register - read it */
1612 	readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1613 
1614 	/* Mask Register - clear it */
1615 	writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1616 
1617 	skb_queue_head_init(&iadev->rx_dma_q);
1618 	iadev->rx_free_desc_qhead = NULL;
1619 
1620 	iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1621 	if (!iadev->rx_open) {
1622 		printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1623 		dev->number);
1624 		goto err_free_dle;
1625 	}
1626 
1627         iadev->rxing = 1;
1628         iadev->rx_pkt_cnt = 0;
1629 	/* Mode Register */
1630 	writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1631 	return 0;
1632 
1633 err_free_dle:
1634 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1635 			  iadev->rx_dle_dma);
1636 err_out:
1637 	return -ENOMEM;
1638 }
1639 
1640 
1641 /*
1642 	The memory map suggested in appendix A and the coding for it.
1643 	Keeping it around just in case we change our mind later.
1644 
1645 		Buffer descr	0x0000 (128 - 4K)
1646 		UBR sched	0x1000 (1K - 4K)
1647 		UBR Wait q	0x2000 (1K - 4K)
1648 		Commn queues	0x3000 Packet Ready, Trasmit comp(0x3100)
1649 					(128 - 256) each
1650 		extended VC	0x4000 (1K - 8K)
1651 		ABR sched	0x6000	and ABR wait queue (1K - 2K) each
1652 		CBR sched	0x7000 (as needed)
1653 		VC table	0x8000 (1K - 32K)
1654 */
1655 
1656 static void tx_intr(struct atm_dev *dev)
1657 {
1658 	IADEV *iadev;
1659 	unsigned short status;
1660         unsigned long flags;
1661 
1662 	iadev = INPH_IA_DEV(dev);
1663 
1664 	status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1665         if (status & TRANSMIT_DONE){
1666 
1667            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1668            spin_lock_irqsave(&iadev->tx_lock, flags);
1669            ia_tx_poll(iadev);
1670            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1671            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1672            if (iadev->close_pending)
1673                wake_up(&iadev->close_wait);
1674         }
1675 	if (status & TCQ_NOT_EMPTY)
1676 	{
1677 	    IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1678 	}
1679 }
1680 
1681 static void tx_dle_intr(struct atm_dev *dev)
1682 {
1683         IADEV *iadev;
1684         struct dle *dle, *cur_dle;
1685         struct sk_buff *skb;
1686         struct atm_vcc *vcc;
1687         struct ia_vcc  *iavcc;
1688         u_int dle_lp;
1689         unsigned long flags;
1690 
1691         iadev = INPH_IA_DEV(dev);
1692         spin_lock_irqsave(&iadev->tx_lock, flags);
1693         dle = iadev->tx_dle_q.read;
1694         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1695                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1696         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1697         while (dle != cur_dle)
1698         {
1699             /* free the DMAed skb */
1700             skb = skb_dequeue(&iadev->tx_dma_q);
1701             if (!skb) break;
1702 
1703 	    /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1704 	    if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1705 		dma_unmap_single(&iadev->pci->dev, dle->sys_pkt_addr, skb->len,
1706 				 DMA_TO_DEVICE);
1707 	    }
1708             vcc = ATM_SKB(skb)->vcc;
1709             if (!vcc) {
1710                   printk("tx_dle_intr: vcc is null\n");
1711 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1712                   dev_kfree_skb_any(skb);
1713 
1714                   return;
1715             }
1716             iavcc = INPH_IA_VCC(vcc);
1717             if (!iavcc) {
1718                   printk("tx_dle_intr: iavcc is null\n");
1719 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1720                   dev_kfree_skb_any(skb);
1721                   return;
1722             }
1723             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1724                if ((vcc->pop) && (skb->len != 0))
1725                {
1726                  vcc->pop(vcc, skb);
1727                }
1728                else {
1729                  dev_kfree_skb_any(skb);
1730                }
1731             }
1732             else { /* Hold the rate-limited skb for flow control */
1733                IA_SKB_STATE(skb) |= IA_DLED;
1734                skb_queue_tail(&iavcc->txing_skb, skb);
1735             }
1736             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);)
1737             if (++dle == iadev->tx_dle_q.end)
1738                  dle = iadev->tx_dle_q.start;
1739         }
1740         iadev->tx_dle_q.read = dle;
1741         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1742 }
1743 
1744 static int open_tx(struct atm_vcc *vcc)
1745 {
1746 	struct ia_vcc *ia_vcc;
1747 	IADEV *iadev;
1748 	struct main_vc *vc;
1749 	struct ext_vc *evc;
1750         int ret;
1751 	IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1752 	if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1753 	iadev = INPH_IA_DEV(vcc->dev);
1754 
1755         if (iadev->phy_type & FE_25MBIT_PHY) {
1756            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1757                printk("IA:  ABR not support\n");
1758                return -EINVAL;
1759            }
1760 	  if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1761                printk("IA:  CBR not support\n");
1762                return -EINVAL;
1763           }
1764         }
1765         ia_vcc =  INPH_IA_VCC(vcc);
1766         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1767         if (vcc->qos.txtp.max_sdu >
1768                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1769            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1770 		  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1771 	   vcc->dev_data = NULL;
1772            kfree(ia_vcc);
1773            return -EINVAL;
1774         }
1775 	ia_vcc->vc_desc_cnt = 0;
1776         ia_vcc->txing = 1;
1777 
1778         /* find pcr */
1779         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1780            vcc->qos.txtp.pcr = iadev->LineRate;
1781         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1782            vcc->qos.txtp.pcr = iadev->LineRate;
1783         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1784            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1785         if (vcc->qos.txtp.pcr > iadev->LineRate)
1786              vcc->qos.txtp.pcr = iadev->LineRate;
1787         ia_vcc->pcr = vcc->qos.txtp.pcr;
1788 
1789         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1790         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1791         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1792         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1793         if (ia_vcc->pcr < iadev->rate_limit)
1794            skb_queue_head_init (&ia_vcc->txing_skb);
1795         if (ia_vcc->pcr < iadev->rate_limit) {
1796 	   struct sock *sk = sk_atm(vcc);
1797 
1798 	   if (vcc->qos.txtp.max_sdu != 0) {
1799                if (ia_vcc->pcr > 60000)
1800                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1801                else if (ia_vcc->pcr > 2000)
1802                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1803                else
1804                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1805            }
1806            else
1807              sk->sk_sndbuf = 24576;
1808         }
1809 
1810 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1811 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1812 	vc += vcc->vci;
1813 	evc += vcc->vci;
1814 	memset((caddr_t)vc, 0, sizeof(*vc));
1815 	memset((caddr_t)evc, 0, sizeof(*evc));
1816 
1817 	/* store the most significant 4 bits of vci as the last 4 bits
1818 		of first part of atm header.
1819 	   store the last 12 bits of vci as first 12 bits of the second
1820 		part of the atm header.
1821 	*/
1822 	evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1823 	evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1824 
1825 	/* check the following for different traffic classes */
1826 	if (vcc->qos.txtp.traffic_class == ATM_UBR)
1827 	{
1828 		vc->type = UBR;
1829                 vc->status = CRC_APPEND;
1830 		vc->acr = cellrate_to_float(iadev->LineRate);
1831                 if (vcc->qos.txtp.pcr > 0)
1832                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1833                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1834                                              vcc->qos.txtp.max_pcr,vc->acr);)
1835 	}
1836 	else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1837 	{       srv_cls_param_t srv_p;
1838 		IF_ABR(printk("Tx ABR VCC\n");)
1839                 init_abr_vc(iadev, &srv_p);
1840                 if (vcc->qos.txtp.pcr > 0)
1841                    srv_p.pcr = vcc->qos.txtp.pcr;
1842                 if (vcc->qos.txtp.min_pcr > 0) {
1843                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1844                    if (tmpsum > iadev->LineRate)
1845                        return -EBUSY;
1846                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1847                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1848                 }
1849                 else srv_p.mcr = 0;
1850                 if (vcc->qos.txtp.icr)
1851                    srv_p.icr = vcc->qos.txtp.icr;
1852                 if (vcc->qos.txtp.tbe)
1853                    srv_p.tbe = vcc->qos.txtp.tbe;
1854                 if (vcc->qos.txtp.frtt)
1855                    srv_p.frtt = vcc->qos.txtp.frtt;
1856                 if (vcc->qos.txtp.rif)
1857                    srv_p.rif = vcc->qos.txtp.rif;
1858                 if (vcc->qos.txtp.rdf)
1859                    srv_p.rdf = vcc->qos.txtp.rdf;
1860                 if (vcc->qos.txtp.nrm_pres)
1861                    srv_p.nrm = vcc->qos.txtp.nrm;
1862                 if (vcc->qos.txtp.trm_pres)
1863                    srv_p.trm = vcc->qos.txtp.trm;
1864                 if (vcc->qos.txtp.adtf_pres)
1865                    srv_p.adtf = vcc->qos.txtp.adtf;
1866                 if (vcc->qos.txtp.cdf_pres)
1867                    srv_p.cdf = vcc->qos.txtp.cdf;
1868                 if (srv_p.icr > srv_p.pcr)
1869                    srv_p.icr = srv_p.pcr;
1870                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n",
1871                                                       srv_p.pcr, srv_p.mcr);)
1872 		ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1873 	} else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1874                 if (iadev->phy_type & FE_25MBIT_PHY) {
1875                     printk("IA:  CBR not support\n");
1876                     return -EINVAL;
1877                 }
1878                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1879                    IF_CBR(printk("PCR is not available\n");)
1880                    return -1;
1881                 }
1882                 vc->type = CBR;
1883                 vc->status = CRC_APPEND;
1884                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1885                     return ret;
1886                 }
1887        }
1888 	else
1889            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn");
1890 
1891         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1892 	IF_EVENT(printk("ia open_tx returning \n");)
1893 	return 0;
1894 }
1895 
1896 
1897 static int tx_init(struct atm_dev *dev)
1898 {
1899 	IADEV *iadev;
1900 	struct tx_buf_desc *buf_desc_ptr;
1901 	unsigned int tx_pkt_start;
1902 	void *dle_addr;
1903 	int i;
1904 	u_short tcq_st_adr;
1905 	u_short *tcq_start;
1906 	u_short prq_st_adr;
1907 	u_short *prq_start;
1908 	struct main_vc *vc;
1909 	struct ext_vc *evc;
1910         u_short tmp16;
1911         u32 vcsize_sel;
1912 
1913 	iadev = INPH_IA_DEV(dev);
1914         spin_lock_init(&iadev->tx_lock);
1915 
1916 	IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1917                                 readw(iadev->seg_reg+SEG_MASK_REG));)
1918 
1919 	/* Allocate 4k (boundary aligned) bytes */
1920 	dle_addr = dma_alloc_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE,
1921 				      &iadev->tx_dle_dma, GFP_KERNEL);
1922 	if (!dle_addr)  {
1923 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1924 		goto err_out;
1925 	}
1926 	iadev->tx_dle_q.start = (struct dle*)dle_addr;
1927 	iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1928 	iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1929 	iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1930 
1931 	/* write the upper 20 bits of the start address to tx list address register */
1932 	writel(iadev->tx_dle_dma & 0xfffff000,
1933 	       iadev->dma + IPHASE5575_TX_LIST_ADDR);
1934 	writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1935 	writew(0, iadev->seg_reg+MODE_REG_0);
1936 	writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1937         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1938         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1939         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1940 
1941 	/*
1942 	   Transmit side control memory map
1943 	   --------------------------------
1944 	 Buffer descr 	0x0000 (128 - 4K)
1945 	 Commn queues	0x1000	Transmit comp, Packet ready(0x1400)
1946 					(512 - 1K) each
1947 					TCQ - 4K, PRQ - 5K
1948 	 CBR Table 	0x1800 (as needed) - 6K
1949 	 UBR Table	0x3000 (1K - 4K) - 12K
1950 	 UBR Wait queue	0x4000 (1K - 4K) - 16K
1951 	 ABR sched	0x5000	and ABR wait queue (1K - 2K) each
1952 				ABR Tbl - 20K, ABR Wq - 22K
1953 	 extended VC	0x6000 (1K - 8K) - 24K
1954 	 VC Table	0x8000 (1K - 32K) - 32K
1955 
1956 	Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1957 	and Wait q, which can be allotted later.
1958 	*/
1959 
1960 	/* Buffer Descriptor Table Base address */
1961 	writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1962 
1963 	/* initialize each entry in the buffer descriptor table */
1964 	buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1965 	memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1966 	buf_desc_ptr++;
1967 	tx_pkt_start = TX_PACKET_RAM;
1968 	for(i=1; i<=iadev->num_tx_desc; i++)
1969 	{
1970 		memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1971 		buf_desc_ptr->desc_mode = AAL5;
1972 		buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1973 		buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1974 		buf_desc_ptr++;
1975 		tx_pkt_start += iadev->tx_buf_sz;
1976 	}
1977         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1978         if (!iadev->tx_buf) {
1979             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1980 	    goto err_free_dle;
1981         }
1982        	for (i= 0; i< iadev->num_tx_desc; i++)
1983        	{
1984 	    struct cpcs_trailer *cpcs;
1985 
1986        	    cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1987             if(!cpcs) {
1988 		printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1989 		goto err_free_tx_bufs;
1990             }
1991 	    iadev->tx_buf[i].cpcs = cpcs;
1992 	    iadev->tx_buf[i].dma_addr = dma_map_single(&iadev->pci->dev,
1993 						       cpcs,
1994 						       sizeof(*cpcs),
1995 						       DMA_TO_DEVICE);
1996         }
1997         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1998                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1999 	if (!iadev->desc_tbl) {
2000 		printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
2001 		goto err_free_all_tx_bufs;
2002 	}
2003 
2004 	/* Communication Queues base address */
2005         i = TX_COMP_Q * iadev->memSize;
2006 	writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
2007 
2008 	/* Transmit Complete Queue */
2009 	writew(i, iadev->seg_reg+TCQ_ST_ADR);
2010 	writew(i, iadev->seg_reg+TCQ_RD_PTR);
2011 	writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
2012 	iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
2013         writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2014                                               iadev->seg_reg+TCQ_ED_ADR);
2015 	/* Fill the TCQ with all the free descriptors. */
2016 	tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2017 	tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2018 	for(i=1; i<=iadev->num_tx_desc; i++)
2019 	{
2020 		*tcq_start = (u_short)i;
2021 		tcq_start++;
2022 	}
2023 
2024 	/* Packet Ready Queue */
2025         i = PKT_RDY_Q * iadev->memSize;
2026 	writew(i, iadev->seg_reg+PRQ_ST_ADR);
2027 	writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2028                                               iadev->seg_reg+PRQ_ED_ADR);
2029 	writew(i, iadev->seg_reg+PRQ_RD_PTR);
2030 	writew(i, iadev->seg_reg+PRQ_WR_PTR);
2031 
2032         /* Load local copy of PRQ and TCQ ptrs */
2033         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2034 	iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2035  	iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2036 
2037 	iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2038 	iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2039 	iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2040 
2041 	/* Just for safety initializing the queue to have desc 1 always */
2042 	/* Fill the PRQ with all the free descriptors. */
2043 	prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2044 	prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2045 	for(i=1; i<=iadev->num_tx_desc; i++)
2046 	{
2047 		*prq_start = (u_short)0;	/* desc 1 in all entries */
2048 		prq_start++;
2049 	}
2050 	/* CBR Table */
2051         IF_INIT(printk("Start CBR Init\n");)
2052 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2053         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2054 #else /* Charlie's logic is wrong ? */
2055         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2056         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2057         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2058 #endif
2059 
2060         IF_INIT(printk("value in register = 0x%x\n",
2061                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2062         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2063         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2064         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2065                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2066         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2067         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2068         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2069         IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n",
2070                iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2071         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2072           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2073           readw(iadev->seg_reg+CBR_TAB_END+1));)
2074 
2075         /* Initialize the CBR Schedualing Table */
2076         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2077                                                           0, iadev->num_vc*6);
2078         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2079         iadev->CbrEntryPt = 0;
2080         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2081         iadev->NumEnabledCBR = 0;
2082 
2083 	/* UBR scheduling Table and wait queue */
2084 	/* initialize all bytes of UBR scheduler table and wait queue to 0
2085 		- SCHEDSZ is 1K (# of entries).
2086 		- UBR Table size is 4K
2087 		- UBR wait queue is 4K
2088 	   since the table and wait queues are contiguous, all the bytes
2089 	   can be initialized by one memeset.
2090 	*/
2091 
2092         vcsize_sel = 0;
2093         i = 8*1024;
2094         while (i != iadev->num_vc) {
2095           i /= 2;
2096           vcsize_sel++;
2097         }
2098 
2099         i = MAIN_VC_TABLE * iadev->memSize;
2100         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2101         i =  EXT_VC_TABLE * iadev->memSize;
2102         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2103         i = UBR_SCHED_TABLE * iadev->memSize;
2104         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2105         i = UBR_WAIT_Q * iadev->memSize;
2106         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2107  	memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2108                                                        0, iadev->num_vc*8);
2109 	/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2110 	/* initialize all bytes of ABR scheduler table and wait queue to 0
2111 		- SCHEDSZ is 1K (# of entries).
2112 		- ABR Table size is 2K
2113 		- ABR wait queue is 2K
2114 	   since the table and wait queues are contiguous, all the bytes
2115 	   can be initialized by one memeset.
2116 	*/
2117         i = ABR_SCHED_TABLE * iadev->memSize;
2118         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2119         i = ABR_WAIT_Q * iadev->memSize;
2120         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2121 
2122         i = ABR_SCHED_TABLE*iadev->memSize;
2123 	memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2124 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2125 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2126         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2127         if (!iadev->testTable) {
2128            printk("Get freepage  failed\n");
2129 	   goto err_free_desc_tbl;
2130         }
2131 	for(i=0; i<iadev->num_vc; i++)
2132 	{
2133 		memset((caddr_t)vc, 0, sizeof(*vc));
2134 		memset((caddr_t)evc, 0, sizeof(*evc));
2135                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2136 						GFP_KERNEL);
2137 		if (!iadev->testTable[i])
2138 			goto err_free_test_tables;
2139               	iadev->testTable[i]->lastTime = 0;
2140  		iadev->testTable[i]->fract = 0;
2141                 iadev->testTable[i]->vc_status = VC_UBR;
2142 		vc++;
2143 		evc++;
2144 	}
2145 
2146 	/* Other Initialization */
2147 
2148 	/* Max Rate Register */
2149         if (iadev->phy_type & FE_25MBIT_PHY) {
2150 	   writew(RATE25, iadev->seg_reg+MAXRATE);
2151 	   writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2152         }
2153         else {
2154 	   writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2155 	   writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2156         }
2157 	/* Set Idle Header Reigisters to be sure */
2158 	writew(0, iadev->seg_reg+IDLEHEADHI);
2159 	writew(0, iadev->seg_reg+IDLEHEADLO);
2160 
2161 	/* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2162         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2163 
2164         iadev->close_pending = 0;
2165         init_waitqueue_head(&iadev->close_wait);
2166         init_waitqueue_head(&iadev->timeout_wait);
2167 	skb_queue_head_init(&iadev->tx_dma_q);
2168 	ia_init_rtn_q(&iadev->tx_return_q);
2169 
2170 	/* RM Cell Protocol ID and Message Type */
2171 	writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2172         skb_queue_head_init (&iadev->tx_backlog);
2173 
2174 	/* Mode Register 1 */
2175 	writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2176 
2177 	/* Mode Register 0 */
2178 	writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2179 
2180 	/* Interrupt Status Register - read to clear */
2181 	readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2182 
2183 	/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2184         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2185         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2186         iadev->tx_pkt_cnt = 0;
2187         iadev->rate_limit = iadev->LineRate / 3;
2188 
2189 	return 0;
2190 
2191 err_free_test_tables:
2192 	while (--i >= 0)
2193 		kfree(iadev->testTable[i]);
2194 	kfree(iadev->testTable);
2195 err_free_desc_tbl:
2196 	kfree(iadev->desc_tbl);
2197 err_free_all_tx_bufs:
2198 	i = iadev->num_tx_desc;
2199 err_free_tx_bufs:
2200 	while (--i >= 0) {
2201 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2202 
2203 		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2204 				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2205 		kfree(desc->cpcs);
2206 	}
2207 	kfree(iadev->tx_buf);
2208 err_free_dle:
2209 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2210 			  iadev->tx_dle_dma);
2211 err_out:
2212 	return -ENOMEM;
2213 }
2214 
2215 static irqreturn_t ia_int(int irq, void *dev_id)
2216 {
2217    struct atm_dev *dev;
2218    IADEV *iadev;
2219    unsigned int status;
2220    int handled = 0;
2221 
2222    dev = dev_id;
2223    iadev = INPH_IA_DEV(dev);
2224    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2225    {
2226 	handled = 1;
2227         IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2228 	if (status & STAT_REASSINT)
2229 	{
2230 	   /* do something */
2231 	   IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2232 	   rx_intr(dev);
2233 	}
2234 	if (status & STAT_DLERINT)
2235 	{
2236 	   /* Clear this bit by writing a 1 to it. */
2237 	   writel(STAT_DLERINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2238 	   rx_dle_intr(dev);
2239 	}
2240 	if (status & STAT_SEGINT)
2241 	{
2242 	   /* do something */
2243            IF_EVENT(printk("IA: tx_intr \n");)
2244 	   tx_intr(dev);
2245 	}
2246 	if (status & STAT_DLETINT)
2247 	{
2248 	   writel(STAT_DLETINT, iadev->reg + IPHASE5575_BUS_STATUS_REG);
2249 	   tx_dle_intr(dev);
2250 	}
2251 	if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2252 	{
2253            if (status & STAT_FEINT)
2254                ia_frontend_intr(iadev);
2255 	}
2256    }
2257    return IRQ_RETVAL(handled);
2258 }
2259 
2260 
2261 
2262 /*----------------------------- entries --------------------------------*/
2263 static int get_esi(struct atm_dev *dev)
2264 {
2265 	IADEV *iadev;
2266 	int i;
2267 	u32 mac1;
2268 	u16 mac2;
2269 
2270 	iadev = INPH_IA_DEV(dev);
2271 	mac1 = cpu_to_be32(le32_to_cpu(readl(
2272 				iadev->reg+IPHASE5575_MAC1)));
2273 	mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2274 	IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2275 	for (i=0; i<MAC1_LEN; i++)
2276 		dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2277 
2278 	for (i=0; i<MAC2_LEN; i++)
2279 		dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2280 	return 0;
2281 }
2282 
2283 static int reset_sar(struct atm_dev *dev)
2284 {
2285 	IADEV *iadev;
2286 	int i, error = 1;
2287 	unsigned int pci[64];
2288 
2289 	iadev = INPH_IA_DEV(dev);
2290 	for(i=0; i<64; i++)
2291 	  if ((error = pci_read_config_dword(iadev->pci,
2292 				i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2293   	      return error;
2294 	writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2295 	for(i=0; i<64; i++)
2296 	  if ((error = pci_write_config_dword(iadev->pci,
2297 					i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2298 	    return error;
2299 	udelay(5);
2300 	return 0;
2301 }
2302 
2303 
2304 static int ia_init(struct atm_dev *dev)
2305 {
2306 	IADEV *iadev;
2307 	unsigned long real_base;
2308 	void __iomem *base;
2309 	unsigned short command;
2310 	int error, i;
2311 
2312 	/* The device has been identified and registered. Now we read
2313 	   necessary configuration info like memory base address,
2314 	   interrupt number etc */
2315 
2316 	IF_INIT(printk(">ia_init\n");)
2317 	dev->ci_range.vpi_bits = 0;
2318 	dev->ci_range.vci_bits = NR_VCI_LD;
2319 
2320 	iadev = INPH_IA_DEV(dev);
2321 	real_base = pci_resource_start (iadev->pci, 0);
2322 	iadev->irq = iadev->pci->irq;
2323 
2324 	error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2325 	if (error) {
2326 		printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2327 				dev->number,error);
2328 		return -EINVAL;
2329 	}
2330 	IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2331 			dev->number, iadev->pci->revision, real_base, iadev->irq);)
2332 
2333 	/* find mapping size of board */
2334 
2335 	iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2336 
2337         if (iadev->pci_map_size == 0x100000){
2338           iadev->num_vc = 4096;
2339 	  dev->ci_range.vci_bits = NR_VCI_4K_LD;
2340           iadev->memSize = 4;
2341         }
2342         else if (iadev->pci_map_size == 0x40000) {
2343           iadev->num_vc = 1024;
2344           iadev->memSize = 1;
2345         }
2346         else {
2347            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2348            return -EINVAL;
2349         }
2350 	IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2351 
2352 	/* enable bus mastering */
2353 	pci_set_master(iadev->pci);
2354 
2355 	/*
2356 	 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2357 	 */
2358 	udelay(10);
2359 
2360 	/* mapping the physical address to a virtual address in address space */
2361 	base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */
2362 
2363 	if (!base)
2364 	{
2365 		printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2366 			    dev->number);
2367 		return -ENOMEM;
2368 	}
2369 	IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2370 			dev->number, iadev->pci->revision, base, iadev->irq);)
2371 
2372 	/* filling the iphase dev structure */
2373 	iadev->mem = iadev->pci_map_size /2;
2374 	iadev->real_base = real_base;
2375 	iadev->base = base;
2376 
2377 	/* Bus Interface Control Registers */
2378 	iadev->reg = base + REG_BASE;
2379 	/* Segmentation Control Registers */
2380 	iadev->seg_reg = base + SEG_BASE;
2381 	/* Reassembly Control Registers */
2382 	iadev->reass_reg = base + REASS_BASE;
2383 	/* Front end/ DMA control registers */
2384 	iadev->phy = base + PHY_BASE;
2385 	iadev->dma = base + PHY_BASE;
2386 	/* RAM - Segmentation RAm and Reassembly RAM */
2387 	iadev->ram = base + ACTUAL_RAM_BASE;
2388 	iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2389 	iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2390 
2391 	/* lets print out the above */
2392 	IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2393           iadev->reg,iadev->seg_reg,iadev->reass_reg,
2394           iadev->phy, iadev->ram, iadev->seg_ram,
2395           iadev->reass_ram);)
2396 
2397 	/* lets try reading the MAC address */
2398 	error = get_esi(dev);
2399 	if (error) {
2400 	  iounmap(iadev->base);
2401 	  return error;
2402 	}
2403         printk("IA: ");
2404 	for (i=0; i < ESI_LEN; i++)
2405                 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2406         printk("\n");
2407 
2408         /* reset SAR */
2409         if (reset_sar(dev)) {
2410 	   iounmap(iadev->base);
2411            printk("IA: reset SAR fail, please try again\n");
2412            return 1;
2413         }
2414 	return 0;
2415 }
2416 
2417 static void ia_update_stats(IADEV *iadev) {
2418     if (!iadev->carrier_detect)
2419         return;
2420     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2421     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2422     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2423     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2424     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2425     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2426     return;
2427 }
2428 
2429 static void ia_led_timer(unsigned long arg) {
2430  	unsigned long flags;
2431   	static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2432         u_char i;
2433         static u32 ctrl_reg;
2434         for (i = 0; i < iadev_count; i++) {
2435            if (ia_dev[i]) {
2436 	      ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2437 	      if (blinking[i] == 0) {
2438 		 blinking[i]++;
2439                  ctrl_reg &= (~CTRL_LED);
2440                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2441                  ia_update_stats(ia_dev[i]);
2442               }
2443               else {
2444 		 blinking[i] = 0;
2445 		 ctrl_reg |= CTRL_LED;
2446                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2447                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2448                  if (ia_dev[i]->close_pending)
2449                     wake_up(&ia_dev[i]->close_wait);
2450                  ia_tx_poll(ia_dev[i]);
2451                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2452               }
2453            }
2454         }
2455 	mod_timer(&ia_timer, jiffies + HZ / 4);
2456  	return;
2457 }
2458 
2459 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2460 	unsigned long addr)
2461 {
2462 	writel(value, INPH_IA_DEV(dev)->phy+addr);
2463 }
2464 
2465 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2466 {
2467 	return readl(INPH_IA_DEV(dev)->phy+addr);
2468 }
2469 
2470 static void ia_free_tx(IADEV *iadev)
2471 {
2472 	int i;
2473 
2474 	kfree(iadev->desc_tbl);
2475 	for (i = 0; i < iadev->num_vc; i++)
2476 		kfree(iadev->testTable[i]);
2477 	kfree(iadev->testTable);
2478 	for (i = 0; i < iadev->num_tx_desc; i++) {
2479 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2480 
2481 		dma_unmap_single(&iadev->pci->dev, desc->dma_addr,
2482 				 sizeof(*desc->cpcs), DMA_TO_DEVICE);
2483 		kfree(desc->cpcs);
2484 	}
2485 	kfree(iadev->tx_buf);
2486 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2487 			  iadev->tx_dle_dma);
2488 }
2489 
2490 static void ia_free_rx(IADEV *iadev)
2491 {
2492 	kfree(iadev->rx_open);
2493 	dma_free_coherent(&iadev->pci->dev, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2494 			  iadev->rx_dle_dma);
2495 }
2496 
2497 static int ia_start(struct atm_dev *dev)
2498 {
2499 	IADEV *iadev;
2500 	int error;
2501 	unsigned char phy;
2502 	u32 ctrl_reg;
2503 	IF_EVENT(printk(">ia_start\n");)
2504 	iadev = INPH_IA_DEV(dev);
2505         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2506                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2507                     dev->number, iadev->irq);
2508 		error = -EAGAIN;
2509 		goto err_out;
2510         }
2511         /* @@@ should release IRQ on error */
2512 	/* enabling memory + master */
2513         if ((error = pci_write_config_word(iadev->pci,
2514 				PCI_COMMAND,
2515 				PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2516 	{
2517                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2518                     "master (0x%x)\n",dev->number, error);
2519 		error = -EIO;
2520 		goto err_free_irq;
2521         }
2522 	udelay(10);
2523 
2524 	/* Maybe we should reset the front end, initialize Bus Interface Control
2525 		Registers and see. */
2526 
2527 	IF_INIT(printk("Bus ctrl reg: %08x\n",
2528                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2529 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2530 	ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2531 			| CTRL_B8
2532 			| CTRL_B16
2533 			| CTRL_B32
2534 			| CTRL_B48
2535 			| CTRL_B64
2536 			| CTRL_B128
2537 			| CTRL_ERRMASK
2538 			| CTRL_DLETMASK		/* shud be removed l8r */
2539 			| CTRL_DLERMASK
2540 			| CTRL_SEGMASK
2541 			| CTRL_REASSMASK
2542 			| CTRL_FEMASK
2543 			| CTRL_CSPREEMPT;
2544 
2545        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2546 
2547 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2548                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2549 	   printk("Bus status reg after init: %08x\n",
2550                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2551 
2552         ia_hw_type(iadev);
2553 	error = tx_init(dev);
2554 	if (error)
2555 		goto err_free_irq;
2556 	error = rx_init(dev);
2557 	if (error)
2558 		goto err_free_tx;
2559 
2560 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2561        	writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2562 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2563                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2564         phy = 0; /* resolve compiler complaint */
2565         IF_INIT (
2566 	if ((phy=ia_phy_get(dev,0)) == 0x30)
2567 		printk("IA: pm5346,rev.%d\n",phy&0x0f);
2568 	else
2569 		printk("IA: utopia,rev.%0x\n",phy);)
2570 
2571 	if (iadev->phy_type &  FE_25MBIT_PHY)
2572            ia_mb25_init(iadev);
2573 	else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2574            ia_suni_pm7345_init(iadev);
2575 	else {
2576 		error = suni_init(dev);
2577 		if (error)
2578 			goto err_free_rx;
2579 		if (dev->phy->start) {
2580 			error = dev->phy->start(dev);
2581 			if (error)
2582 				goto err_free_rx;
2583 		}
2584 		/* Get iadev->carrier_detect status */
2585 		ia_frontend_intr(iadev);
2586 	}
2587 	return 0;
2588 
2589 err_free_rx:
2590 	ia_free_rx(iadev);
2591 err_free_tx:
2592 	ia_free_tx(iadev);
2593 err_free_irq:
2594 	free_irq(iadev->irq, dev);
2595 err_out:
2596 	return error;
2597 }
2598 
2599 static void ia_close(struct atm_vcc *vcc)
2600 {
2601 	DEFINE_WAIT(wait);
2602         u16 *vc_table;
2603         IADEV *iadev;
2604         struct ia_vcc *ia_vcc;
2605         struct sk_buff *skb = NULL;
2606         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2607         unsigned long closetime, flags;
2608 
2609         iadev = INPH_IA_DEV(vcc->dev);
2610         ia_vcc = INPH_IA_VCC(vcc);
2611 	if (!ia_vcc) return;
2612 
2613         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n",
2614                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2615 	clear_bit(ATM_VF_READY,&vcc->flags);
2616         skb_queue_head_init (&tmp_tx_backlog);
2617         skb_queue_head_init (&tmp_vcc_backlog);
2618         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2619            iadev->close_pending++;
2620 	   prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2621 	   schedule_timeout(50);
2622 	   finish_wait(&iadev->timeout_wait, &wait);
2623            spin_lock_irqsave(&iadev->tx_lock, flags);
2624            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2625               if (ATM_SKB(skb)->vcc == vcc){
2626                  if (vcc->pop) vcc->pop(vcc, skb);
2627                  else dev_kfree_skb_any(skb);
2628               }
2629               else
2630                  skb_queue_tail(&tmp_tx_backlog, skb);
2631            }
2632            while((skb = skb_dequeue(&tmp_tx_backlog)))
2633              skb_queue_tail(&iadev->tx_backlog, skb);
2634            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2635            closetime = 300000 / ia_vcc->pcr;
2636            if (closetime == 0)
2637               closetime = 1;
2638            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2639            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2640            spin_lock_irqsave(&iadev->tx_lock, flags);
2641            iadev->close_pending--;
2642            iadev->testTable[vcc->vci]->lastTime = 0;
2643            iadev->testTable[vcc->vci]->fract = 0;
2644            iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2645            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2646               if (vcc->qos.txtp.min_pcr > 0)
2647                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2648            }
2649            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2650               ia_vcc = INPH_IA_VCC(vcc);
2651               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2652               ia_cbrVc_close (vcc);
2653            }
2654            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2655         }
2656 
2657         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2658            // reset reass table
2659            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2660            vc_table += vcc->vci;
2661            *vc_table = NO_AAL5_PKT;
2662            // reset vc table
2663            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2664            vc_table += vcc->vci;
2665            *vc_table = (vcc->vci << 6) | 15;
2666            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2667               struct abr_vc_table __iomem *abr_vc_table =
2668                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2669               abr_vc_table +=  vcc->vci;
2670               abr_vc_table->rdf = 0x0003;
2671               abr_vc_table->air = 0x5eb1;
2672            }
2673            // Drain the packets
2674            rx_dle_intr(vcc->dev);
2675            iadev->rx_open[vcc->vci] = NULL;
2676         }
2677 	kfree(INPH_IA_VCC(vcc));
2678         ia_vcc = NULL;
2679         vcc->dev_data = NULL;
2680         clear_bit(ATM_VF_ADDR,&vcc->flags);
2681         return;
2682 }
2683 
2684 static int ia_open(struct atm_vcc *vcc)
2685 {
2686 	struct ia_vcc *ia_vcc;
2687 	int error;
2688 	if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2689 	{
2690 		IF_EVENT(printk("ia: not partially allocated resources\n");)
2691 		vcc->dev_data = NULL;
2692 	}
2693 	if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2694 	{
2695 		IF_EVENT(printk("iphase open: unspec part\n");)
2696 		set_bit(ATM_VF_ADDR,&vcc->flags);
2697 	}
2698 	if (vcc->qos.aal != ATM_AAL5)
2699 		return -EINVAL;
2700 	IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2701                                  vcc->dev->number, vcc->vpi, vcc->vci);)
2702 
2703 	/* Device dependent initialization */
2704 	ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2705 	if (!ia_vcc) return -ENOMEM;
2706 	vcc->dev_data = ia_vcc;
2707 
2708 	if ((error = open_rx(vcc)))
2709 	{
2710 		IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2711 		ia_close(vcc);
2712 		return error;
2713 	}
2714 
2715 	if ((error = open_tx(vcc)))
2716 	{
2717 		IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2718 		ia_close(vcc);
2719 		return error;
2720 	}
2721 
2722 	set_bit(ATM_VF_READY,&vcc->flags);
2723 
2724 #if 0
2725         {
2726            static u8 first = 1;
2727            if (first) {
2728               ia_timer.expires = jiffies + 3*HZ;
2729               add_timer(&ia_timer);
2730               first = 0;
2731            }
2732         }
2733 #endif
2734 	IF_EVENT(printk("ia open returning\n");)
2735 	return 0;
2736 }
2737 
2738 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2739 {
2740 	IF_EVENT(printk(">ia_change_qos\n");)
2741 	return 0;
2742 }
2743 
2744 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2745 {
2746    IA_CMDBUF ia_cmds;
2747    IADEV *iadev;
2748    int i, board;
2749    u16 __user *tmps;
2750    IF_EVENT(printk(">ia_ioctl\n");)
2751    if (cmd != IA_CMD) {
2752       if (!dev->phy->ioctl) return -EINVAL;
2753       return dev->phy->ioctl(dev,cmd,arg);
2754    }
2755    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2756    board = ia_cmds.status;
2757    if ((board < 0) || (board > iadev_count))
2758          board = 0;
2759    iadev = ia_dev[board];
2760    switch (ia_cmds.cmd) {
2761    case MEMDUMP:
2762    {
2763 	switch (ia_cmds.sub_cmd) {
2764        	  case MEMDUMP_DEV:
2765 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2766 	     if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2767                 return -EFAULT;
2768              ia_cmds.status = 0;
2769              break;
2770           case MEMDUMP_SEGREG:
2771 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2772              tmps = (u16 __user *)ia_cmds.buf;
2773              for(i=0; i<0x80; i+=2, tmps++)
2774                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2775              ia_cmds.status = 0;
2776              ia_cmds.len = 0x80;
2777              break;
2778           case MEMDUMP_REASSREG:
2779 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2780              tmps = (u16 __user *)ia_cmds.buf;
2781              for(i=0; i<0x80; i+=2, tmps++)
2782                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2783              ia_cmds.status = 0;
2784              ia_cmds.len = 0x80;
2785              break;
2786           case MEMDUMP_FFL:
2787           {
2788              ia_regs_t       *regs_local;
2789              ffredn_t        *ffL;
2790              rfredn_t        *rfL;
2791 
2792 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2793 	     regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2794 	     if (!regs_local) return -ENOMEM;
2795 	     ffL = &regs_local->ffredn;
2796 	     rfL = &regs_local->rfredn;
2797              /* Copy real rfred registers into the local copy */
2798  	     for (i=0; i<(sizeof (rfredn_t))/4; i++)
2799                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2800              	/* Copy real ffred registers into the local copy */
2801 	     for (i=0; i<(sizeof (ffredn_t))/4; i++)
2802                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2803 
2804              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2805                 kfree(regs_local);
2806                 return -EFAULT;
2807              }
2808              kfree(regs_local);
2809              printk("Board %d registers dumped\n", board);
2810              ia_cmds.status = 0;
2811 	 }
2812     	     break;
2813          case READ_REG:
2814          {
2815 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2816              desc_dbg(iadev);
2817              ia_cmds.status = 0;
2818          }
2819              break;
2820          case 0x6:
2821          {
2822              ia_cmds.status = 0;
2823              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2824              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2825          }
2826              break;
2827          case 0x8:
2828          {
2829              struct k_sonet_stats *stats;
2830              stats = &PRIV(_ia_dev[board])->sonet_stats;
2831              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2832              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2833              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2834              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2835              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2836              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2837              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2838              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2839              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2840          }
2841             ia_cmds.status = 0;
2842             break;
2843          case 0x9:
2844 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2845             for (i = 1; i <= iadev->num_rx_desc; i++)
2846                free_desc(_ia_dev[board], i);
2847             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2848                                             iadev->reass_reg+REASS_MASK_REG);
2849             iadev->rxing = 1;
2850 
2851             ia_cmds.status = 0;
2852             break;
2853 
2854          case 0xb:
2855 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2856             ia_frontend_intr(iadev);
2857             break;
2858          case 0xa:
2859 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2860          {
2861              ia_cmds.status = 0;
2862              IADebugFlag = ia_cmds.maddr;
2863              printk("New debug option loaded\n");
2864          }
2865              break;
2866          default:
2867              ia_cmds.status = 0;
2868              break;
2869       }
2870    }
2871       break;
2872    default:
2873       break;
2874 
2875    }
2876    return 0;
2877 }
2878 
2879 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2880 	void __user *optval, int optlen)
2881 {
2882 	IF_EVENT(printk(">ia_getsockopt\n");)
2883 	return -EINVAL;
2884 }
2885 
2886 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2887 	void __user *optval, unsigned int optlen)
2888 {
2889 	IF_EVENT(printk(">ia_setsockopt\n");)
2890 	return -EINVAL;
2891 }
2892 
2893 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2894         IADEV *iadev;
2895         struct dle *wr_ptr;
2896         struct tx_buf_desc __iomem *buf_desc_ptr;
2897         int desc;
2898         int comp_code;
2899         int total_len;
2900         struct cpcs_trailer *trailer;
2901         struct ia_vcc *iavcc;
2902 
2903         iadev = INPH_IA_DEV(vcc->dev);
2904         iavcc = INPH_IA_VCC(vcc);
2905         if (!iavcc->txing) {
2906            printk("discard packet on closed VC\n");
2907            if (vcc->pop)
2908 		vcc->pop(vcc, skb);
2909            else
2910 		dev_kfree_skb_any(skb);
2911 	   return 0;
2912         }
2913 
2914         if (skb->len > iadev->tx_buf_sz - 8) {
2915            printk("Transmit size over tx buffer size\n");
2916            if (vcc->pop)
2917                  vcc->pop(vcc, skb);
2918            else
2919                  dev_kfree_skb_any(skb);
2920           return 0;
2921         }
2922         if ((unsigned long)skb->data & 3) {
2923            printk("Misaligned SKB\n");
2924            if (vcc->pop)
2925                  vcc->pop(vcc, skb);
2926            else
2927                  dev_kfree_skb_any(skb);
2928            return 0;
2929         }
2930 	/* Get a descriptor number from our free descriptor queue
2931 	   We get the descr number from the TCQ now, since I am using
2932 	   the TCQ as a free buffer queue. Initially TCQ will be
2933 	   initialized with all the descriptors and is hence, full.
2934 	*/
2935 	desc = get_desc (iadev, iavcc);
2936 	if (desc == 0xffff)
2937 	    return 1;
2938 	comp_code = desc >> 13;
2939 	desc &= 0x1fff;
2940 
2941 	if ((desc == 0) || (desc > iadev->num_tx_desc))
2942 	{
2943 		IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2944                 atomic_inc(&vcc->stats->tx);
2945 		if (vcc->pop)
2946 		    vcc->pop(vcc, skb);
2947 		else
2948 		    dev_kfree_skb_any(skb);
2949 		return 0;   /* return SUCCESS */
2950 	}
2951 
2952 	if (comp_code)
2953 	{
2954 	    IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2955                                                             desc, comp_code);)
2956 	}
2957 
2958         /* remember the desc and vcc mapping */
2959         iavcc->vc_desc_cnt++;
2960         iadev->desc_tbl[desc-1].iavcc = iavcc;
2961         iadev->desc_tbl[desc-1].txskb = skb;
2962         IA_SKB_STATE(skb) = 0;
2963 
2964         iadev->ffL.tcq_rd += 2;
2965         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2966 	  	iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2967 	writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2968 
2969 	/* Put the descriptor number in the packet ready queue
2970 		and put the updated write pointer in the DLE field
2971 	*/
2972 	*(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2973 
2974  	iadev->ffL.prq_wr += 2;
2975         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2976                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2977 
2978 	/* Figure out the exact length of the packet and padding required to
2979            make it  aligned on a 48 byte boundary.  */
2980 	total_len = skb->len + sizeof(struct cpcs_trailer);
2981 	total_len = ((total_len + 47) / 48) * 48;
2982 	IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2983 
2984 	/* Put the packet in a tx buffer */
2985 	trailer = iadev->tx_buf[desc-1].cpcs;
2986         IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n",
2987                   skb, skb->data, skb->len, desc);)
2988 	trailer->control = 0;
2989         /*big endian*/
2990 	trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2991 	trailer->crc32 = 0;	/* not needed - dummy bytes */
2992 
2993 	/* Display the packet */
2994 	IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2995                                                         skb->len, tcnter++);
2996         xdump(skb->data, skb->len, "TX: ");
2997         printk("\n");)
2998 
2999 	/* Build the buffer descriptor */
3000 	buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
3001 	buf_desc_ptr += desc;	/* points to the corresponding entry */
3002 	buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
3003 	/* Huh ? p.115 of users guide describes this as a read-only register */
3004         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
3005 	buf_desc_ptr->vc_index = vcc->vci;
3006 	buf_desc_ptr->bytes = total_len;
3007 
3008         if (vcc->qos.txtp.traffic_class == ATM_ABR)
3009 	   clear_lockup (vcc, iadev);
3010 
3011 	/* Build the DLE structure */
3012 	wr_ptr = iadev->tx_dle_q.write;
3013 	memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3014 	wr_ptr->sys_pkt_addr = dma_map_single(&iadev->pci->dev, skb->data,
3015 					      skb->len, DMA_TO_DEVICE);
3016 	wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3017                                                   buf_desc_ptr->buf_start_lo;
3018 	/* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */
3019 	wr_ptr->bytes = skb->len;
3020 
3021         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3022         if ((wr_ptr->bytes >> 2) == 0xb)
3023            wr_ptr->bytes = 0x30;
3024 
3025 	wr_ptr->mode = TX_DLE_PSI;
3026 	wr_ptr->prq_wr_ptr_data = 0;
3027 
3028 	/* end is not to be used for the DLE q */
3029 	if (++wr_ptr == iadev->tx_dle_q.end)
3030 		wr_ptr = iadev->tx_dle_q.start;
3031 
3032         /* Build trailer dle */
3033         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3034         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3035           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3036 
3037         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3038         wr_ptr->mode = DMA_INT_ENABLE;
3039         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3040 
3041         /* end is not to be used for the DLE q */
3042         if (++wr_ptr == iadev->tx_dle_q.end)
3043                 wr_ptr = iadev->tx_dle_q.start;
3044 
3045 	iadev->tx_dle_q.write = wr_ptr;
3046         ATM_DESC(skb) = vcc->vci;
3047         skb_queue_tail(&iadev->tx_dma_q, skb);
3048 
3049         atomic_inc(&vcc->stats->tx);
3050         iadev->tx_pkt_cnt++;
3051 	/* Increment transaction counter */
3052 	writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3053 
3054 #if 0
3055         /* add flow control logic */
3056         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3057           if (iavcc->vc_desc_cnt > 10) {
3058              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3059             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3060               iavcc->flow_inc = -1;
3061               iavcc->saved_tx_quota = vcc->tx_quota;
3062            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3063              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3064              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3065               iavcc->flow_inc = 0;
3066            }
3067         }
3068 #endif
3069 	IF_TX(printk("ia send done\n");)
3070 	return 0;
3071 }
3072 
3073 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3074 {
3075         IADEV *iadev;
3076         unsigned long flags;
3077 
3078         iadev = INPH_IA_DEV(vcc->dev);
3079         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3080         {
3081             if (!skb)
3082                 printk(KERN_CRIT "null skb in ia_send\n");
3083             else dev_kfree_skb_any(skb);
3084             return -EINVAL;
3085         }
3086         spin_lock_irqsave(&iadev->tx_lock, flags);
3087         if (!test_bit(ATM_VF_READY,&vcc->flags)){
3088             dev_kfree_skb_any(skb);
3089             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3090             return -EINVAL;
3091         }
3092         ATM_SKB(skb)->vcc = vcc;
3093 
3094         if (skb_peek(&iadev->tx_backlog)) {
3095            skb_queue_tail(&iadev->tx_backlog, skb);
3096         }
3097         else {
3098            if (ia_pkt_tx (vcc, skb)) {
3099               skb_queue_tail(&iadev->tx_backlog, skb);
3100            }
3101         }
3102         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3103         return 0;
3104 
3105 }
3106 
3107 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3108 {
3109   int   left = *pos, n;
3110   char  *tmpPtr;
3111   IADEV *iadev = INPH_IA_DEV(dev);
3112   if(!left--) {
3113      if (iadev->phy_type == FE_25MBIT_PHY) {
3114        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3115        return n;
3116      }
3117      if (iadev->phy_type == FE_DS3_PHY)
3118         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3119      else if (iadev->phy_type == FE_E3_PHY)
3120         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3121      else if (iadev->phy_type == FE_UTP_OPTION)
3122          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155");
3123      else
3124         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3125      tmpPtr = page + n;
3126      if (iadev->pci_map_size == 0x40000)
3127         n += sprintf(tmpPtr, "-1KVC-");
3128      else
3129         n += sprintf(tmpPtr, "-4KVC-");
3130      tmpPtr = page + n;
3131      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3132         n += sprintf(tmpPtr, "1M  \n");
3133      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3134         n += sprintf(tmpPtr, "512K\n");
3135      else
3136        n += sprintf(tmpPtr, "128K\n");
3137      return n;
3138   }
3139   if (!left) {
3140      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3141                            "  Size of Tx Buffer  :  %u\n"
3142                            "  Number of Rx Buffer:  %u\n"
3143                            "  Size of Rx Buffer  :  %u\n"
3144                            "  Packets Receiverd  :  %u\n"
3145                            "  Packets Transmitted:  %u\n"
3146                            "  Cells Received     :  %u\n"
3147                            "  Cells Transmitted  :  %u\n"
3148                            "  Board Dropped Cells:  %u\n"
3149                            "  Board Dropped Pkts :  %u\n",
3150                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3151                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3152                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3153                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3154                            iadev->drop_rxcell, iadev->drop_rxpkt);
3155   }
3156   return 0;
3157 }
3158 
3159 static const struct atmdev_ops ops = {
3160 	.open		= ia_open,
3161 	.close		= ia_close,
3162 	.ioctl		= ia_ioctl,
3163 	.getsockopt	= ia_getsockopt,
3164 	.setsockopt	= ia_setsockopt,
3165 	.send		= ia_send,
3166 	.phy_put	= ia_phy_put,
3167 	.phy_get	= ia_phy_get,
3168 	.change_qos	= ia_change_qos,
3169 	.proc_read	= ia_proc_read,
3170 	.owner		= THIS_MODULE,
3171 };
3172 
3173 static int ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3174 {
3175 	struct atm_dev *dev;
3176 	IADEV *iadev;
3177 	int ret;
3178 
3179 	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3180 	if (!iadev) {
3181 		ret = -ENOMEM;
3182 		goto err_out;
3183 	}
3184 
3185 	iadev->pci = pdev;
3186 
3187 	IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3188 		pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3189 	if (pci_enable_device(pdev)) {
3190 		ret = -ENODEV;
3191 		goto err_out_free_iadev;
3192 	}
3193 	dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL);
3194 	if (!dev) {
3195 		ret = -ENOMEM;
3196 		goto err_out_disable_dev;
3197 	}
3198 	dev->dev_data = iadev;
3199 	IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3200 	IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev,
3201 		iadev->LineRate);)
3202 
3203 	pci_set_drvdata(pdev, dev);
3204 
3205 	ia_dev[iadev_count] = iadev;
3206 	_ia_dev[iadev_count] = dev;
3207 	iadev_count++;
3208 	if (ia_init(dev) || ia_start(dev)) {
3209 		IF_INIT(printk("IA register failed!\n");)
3210 		iadev_count--;
3211 		ia_dev[iadev_count] = NULL;
3212 		_ia_dev[iadev_count] = NULL;
3213 		ret = -EINVAL;
3214 		goto err_out_deregister_dev;
3215 	}
3216 	IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3217 
3218 	iadev->next_board = ia_boards;
3219 	ia_boards = dev;
3220 
3221 	return 0;
3222 
3223 err_out_deregister_dev:
3224 	atm_dev_deregister(dev);
3225 err_out_disable_dev:
3226 	pci_disable_device(pdev);
3227 err_out_free_iadev:
3228 	kfree(iadev);
3229 err_out:
3230 	return ret;
3231 }
3232 
3233 static void ia_remove_one(struct pci_dev *pdev)
3234 {
3235 	struct atm_dev *dev = pci_get_drvdata(pdev);
3236 	IADEV *iadev = INPH_IA_DEV(dev);
3237 
3238 	/* Disable phy interrupts */
3239 	ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE),
3240 				   SUNI_RSOP_CIE);
3241 	udelay(1);
3242 
3243 	if (dev->phy && dev->phy->stop)
3244 		dev->phy->stop(dev);
3245 
3246 	/* De-register device */
3247       	free_irq(iadev->irq, dev);
3248 	iadev_count--;
3249 	ia_dev[iadev_count] = NULL;
3250 	_ia_dev[iadev_count] = NULL;
3251 	IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3252 	atm_dev_deregister(dev);
3253 
3254       	iounmap(iadev->base);
3255 	pci_disable_device(pdev);
3256 
3257 	ia_free_rx(iadev);
3258 	ia_free_tx(iadev);
3259 
3260       	kfree(iadev);
3261 }
3262 
3263 static struct pci_device_id ia_pci_tbl[] = {
3264 	{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3265 	{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3266 	{ 0,}
3267 };
3268 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3269 
3270 static struct pci_driver ia_driver = {
3271 	.name =         DEV_LABEL,
3272 	.id_table =     ia_pci_tbl,
3273 	.probe =        ia_init_one,
3274 	.remove =       ia_remove_one,
3275 };
3276 
3277 static int __init ia_module_init(void)
3278 {
3279 	int ret;
3280 
3281 	ret = pci_register_driver(&ia_driver);
3282 	if (ret >= 0) {
3283 		ia_timer.expires = jiffies + 3*HZ;
3284 		add_timer(&ia_timer);
3285 	} else
3286 		printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3287 	return ret;
3288 }
3289 
3290 static void __exit ia_module_exit(void)
3291 {
3292 	pci_unregister_driver(&ia_driver);
3293 
3294         del_timer(&ia_timer);
3295 }
3296 
3297 module_init(ia_module_init);
3298 module_exit(ia_module_exit);
3299