xref: /openbmc/linux/drivers/atm/iphase.c (revision 96de0e252cedffad61b3cb5e05662c591898e69a)
1 /******************************************************************************
2          iphase.c: Device driver for Interphase ATM PCI adapter cards
3                     Author: Peter Wang  <pwang@iphase.com>
4 		   Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
5                    Interphase Corporation  <www.iphase.com>
6                                Version: 1.0
7 *******************************************************************************
8 
9       This software may be used and distributed according to the terms
10       of the GNU General Public License (GPL), incorporated herein by reference.
11       Drivers based on this skeleton fall under the GPL and must retain
12       the authorship (implicit copyright) notice.
13 
14       This program is distributed in the hope that it will be useful, but
15       WITHOUT ANY WARRANTY; without even the implied warranty of
16       MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17       General Public License for more details.
18 
19       Modified from an incomplete driver for Interphase 5575 1KVC 1M card which
20       was originally written by Monalisa Agrawal at UNH. Now this driver
21       supports a variety of varients of Interphase ATM PCI (i)Chip adapter
22       card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM)
23       in terms of PHY type, the size of control memory and the size of
24       packet memory. The followings are the change log and history:
25 
26           Bugfix the Mona's UBR driver.
27           Modify the basic memory allocation and dma logic.
28           Port the driver to the latest kernel from 2.0.46.
29           Complete the ABR logic of the driver, and added the ABR work-
30               around for the hardware anormalies.
31           Add the CBR support.
32 	  Add the flow control logic to the driver to allow rate-limit VC.
33           Add 4K VC support to the board with 512K control memory.
34           Add the support of all the variants of the Interphase ATM PCI
35           (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525
36           (25M UTP25) and x531 (DS3 and E3).
37           Add SMP support.
38 
39       Support and updates available at: ftp://ftp.iphase.com/pub/atm
40 
41 *******************************************************************************/
42 
43 #include <linux/module.h>
44 #include <linux/kernel.h>
45 #include <linux/mm.h>
46 #include <linux/pci.h>
47 #include <linux/errno.h>
48 #include <linux/atm.h>
49 #include <linux/atmdev.h>
50 #include <linux/sonet.h>
51 #include <linux/skbuff.h>
52 #include <linux/time.h>
53 #include <linux/delay.h>
54 #include <linux/uio.h>
55 #include <linux/init.h>
56 #include <linux/wait.h>
57 #include <asm/system.h>
58 #include <asm/io.h>
59 #include <asm/atomic.h>
60 #include <asm/uaccess.h>
61 #include <asm/string.h>
62 #include <asm/byteorder.h>
63 #include <linux/vmalloc.h>
64 #include "iphase.h"
65 #include "suni.h"
66 #define swap(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8))
67 struct suni_priv {
68         struct k_sonet_stats sonet_stats; /* link diagnostics */
69         unsigned char loop_mode;        /* loopback mode */
70         struct atm_dev *dev;            /* device back-pointer */
71         struct suni_priv *next;         /* next SUNI */
72 };
73 #define PRIV(dev) ((struct suni_priv *) dev->phy_data)
74 
75 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr);
76 static void desc_dbg(IADEV *iadev);
77 
78 static IADEV *ia_dev[8];
79 static struct atm_dev *_ia_dev[8];
80 static int iadev_count;
81 static void ia_led_timer(unsigned long arg);
82 static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0);
83 static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
84 static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
85 static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER
86             |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0;
87 
88 module_param(IA_TX_BUF, int, 0);
89 module_param(IA_TX_BUF_SZ, int, 0);
90 module_param(IA_RX_BUF, int, 0);
91 module_param(IA_RX_BUF_SZ, int, 0);
92 module_param(IADebugFlag, uint, 0644);
93 
94 MODULE_LICENSE("GPL");
95 
96 #if BITS_PER_LONG != 32
97 #  error FIXME: this driver only works on 32-bit platforms
98 #endif
99 
100 /**************************** IA_LIB **********************************/
101 
102 static void ia_init_rtn_q (IARTN_Q *que)
103 {
104    que->next = NULL;
105    que->tail = NULL;
106 }
107 
108 static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data)
109 {
110    data->next = NULL;
111    if (que->next == NULL)
112       que->next = que->tail = data;
113    else {
114       data->next = que->next;
115       que->next = data;
116    }
117    return;
118 }
119 
120 static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) {
121    IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
122    if (!entry) return -1;
123    entry->data = data;
124    entry->next = NULL;
125    if (que->next == NULL)
126       que->next = que->tail = entry;
127    else {
128       que->tail->next = entry;
129       que->tail = que->tail->next;
130    }
131    return 1;
132 }
133 
134 static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) {
135    IARTN_Q *tmpdata;
136    if (que->next == NULL)
137       return NULL;
138    tmpdata = que->next;
139    if ( que->next == que->tail)
140       que->next = que->tail = NULL;
141    else
142       que->next = que->next->next;
143    return tmpdata;
144 }
145 
146 static void ia_hack_tcq(IADEV *dev) {
147 
148   u_short 		desc1;
149   u_short		tcq_wr;
150   struct ia_vcc         *iavcc_r = NULL;
151 
152   tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff;
153   while (dev->host_tcq_wr != tcq_wr) {
154      desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr);
155      if (!desc1) ;
156      else if (!dev->desc_tbl[desc1 -1].timestamp) {
157         IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);)
158         *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0;
159      }
160      else if (dev->desc_tbl[desc1 -1].timestamp) {
161         if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) {
162            printk("IA: Fatal err in get_desc\n");
163            continue;
164         }
165         iavcc_r->vc_desc_cnt--;
166         dev->desc_tbl[desc1 -1].timestamp = 0;
167         IF_EVENT(printk("ia_hack: return_q skb = 0x%x desc = %d\n",
168                                    (u32)dev->desc_tbl[desc1 -1].txskb, desc1);)
169         if (iavcc_r->pcr < dev->rate_limit) {
170            IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE;
171            if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0)
172               printk("ia_hack_tcq: No memory available\n");
173         }
174         dev->desc_tbl[desc1 -1].iavcc = NULL;
175         dev->desc_tbl[desc1 -1].txskb = NULL;
176      }
177      dev->host_tcq_wr += 2;
178      if (dev->host_tcq_wr > dev->ffL.tcq_ed)
179         dev->host_tcq_wr = dev->ffL.tcq_st;
180   }
181 } /* ia_hack_tcq */
182 
183 static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) {
184   u_short 		desc_num, i;
185   struct sk_buff        *skb;
186   struct ia_vcc         *iavcc_r = NULL;
187   unsigned long delta;
188   static unsigned long timer = 0;
189   int ltimeout;
190 
191   ia_hack_tcq (dev);
192   if(((jiffies - timer)>50)||((dev->ffL.tcq_rd==dev->host_tcq_wr))){
193      timer = jiffies;
194      i=0;
195      while (i < dev->num_tx_desc) {
196         if (!dev->desc_tbl[i].timestamp) {
197            i++;
198            continue;
199         }
200         ltimeout = dev->desc_tbl[i].iavcc->ltimeout;
201         delta = jiffies - dev->desc_tbl[i].timestamp;
202         if (delta >= ltimeout) {
203            IF_ABR(printk("RECOVER run!! desc_tbl %d = %d  delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);)
204            if (dev->ffL.tcq_rd == dev->ffL.tcq_st)
205               dev->ffL.tcq_rd =  dev->ffL.tcq_ed;
206            else
207               dev->ffL.tcq_rd -= 2;
208            *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1;
209            if (!(skb = dev->desc_tbl[i].txskb) ||
210                           !(iavcc_r = dev->desc_tbl[i].iavcc))
211               printk("Fatal err, desc table vcc or skb is NULL\n");
212            else
213               iavcc_r->vc_desc_cnt--;
214            dev->desc_tbl[i].timestamp = 0;
215            dev->desc_tbl[i].iavcc = NULL;
216            dev->desc_tbl[i].txskb = NULL;
217         }
218         i++;
219      } /* while */
220   }
221   if (dev->ffL.tcq_rd == dev->host_tcq_wr)
222      return 0xFFFF;
223 
224   /* Get the next available descriptor number from TCQ */
225   desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
226 
227   while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) {
228      dev->ffL.tcq_rd += 2;
229      if (dev->ffL.tcq_rd > dev->ffL.tcq_ed)
230      dev->ffL.tcq_rd = dev->ffL.tcq_st;
231      if (dev->ffL.tcq_rd == dev->host_tcq_wr)
232         return 0xFFFF;
233      desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd);
234   }
235 
236   /* get system time */
237   dev->desc_tbl[desc_num -1].timestamp = jiffies;
238   return desc_num;
239 }
240 
241 static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) {
242   u_char          	foundLockUp;
243   vcstatus_t		*vcstatus;
244   u_short               *shd_tbl;
245   u_short               tempCellSlot, tempFract;
246   struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR;
247   struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR;
248   u_int  i;
249 
250   if (vcc->qos.txtp.traffic_class == ATM_ABR) {
251      vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status);
252      vcstatus->cnt++;
253      foundLockUp = 0;
254      if( vcstatus->cnt == 0x05 ) {
255         abr_vc += vcc->vci;
256 	eabr_vc += vcc->vci;
257 	if( eabr_vc->last_desc ) {
258 	   if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) {
259               /* Wait for 10 Micro sec */
260               udelay(10);
261 	      if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE))
262 		 foundLockUp = 1;
263            }
264 	   else {
265 	      tempCellSlot = abr_vc->last_cell_slot;
266               tempFract    = abr_vc->fraction;
267               if((tempCellSlot == dev->testTable[vcc->vci]->lastTime)
268                          && (tempFract == dev->testTable[vcc->vci]->fract))
269 	         foundLockUp = 1;
270               dev->testTable[vcc->vci]->lastTime = tempCellSlot;
271               dev->testTable[vcc->vci]->fract = tempFract;
272 	   }
273         } /* last descriptor */
274         vcstatus->cnt = 0;
275      } /* vcstatus->cnt */
276 
277      if (foundLockUp) {
278         IF_ABR(printk("LOCK UP found\n");)
279 	writew(0xFFFD, dev->seg_reg+MODE_REG_0);
280         /* Wait for 10 Micro sec */
281         udelay(10);
282         abr_vc->status &= 0xFFF8;
283         abr_vc->status |= 0x0001;  /* state is idle */
284 	shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR;
285 	for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ );
286 	if (i < dev->num_vc)
287            shd_tbl[i] = vcc->vci;
288         else
289            IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);)
290         writew(T_ONLINE, dev->seg_reg+MODE_REG_0);
291         writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG);
292         writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG);
293 	vcstatus->cnt = 0;
294      } /* foundLockUp */
295 
296   } /* if an ABR VC */
297 
298 
299 }
300 
301 /*
302 ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format.
303 **
304 **  +----+----+------------------+-------------------------------+
305 **  |  R | NZ |  5-bit exponent  |        9-bit mantissa         |
306 **  +----+----+------------------+-------------------------------+
307 **
308 **    R = reserved (written as 0)
309 **    NZ = 0 if 0 cells/sec; 1 otherwise
310 **
311 **    if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec
312 */
313 static u16
314 cellrate_to_float(u32 cr)
315 {
316 
317 #define	NZ 		0x4000
318 #define	M_BITS		9		/* Number of bits in mantissa */
319 #define	E_BITS		5		/* Number of bits in exponent */
320 #define	M_MASK		0x1ff
321 #define	E_MASK		0x1f
322   u16   flot;
323   u32	tmp = cr & 0x00ffffff;
324   int 	i   = 0;
325   if (cr == 0)
326      return 0;
327   while (tmp != 1) {
328      tmp >>= 1;
329      i++;
330   }
331   if (i == M_BITS)
332      flot = NZ | (i << M_BITS) | (cr & M_MASK);
333   else if (i < M_BITS)
334      flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK);
335   else
336      flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK);
337   return flot;
338 }
339 
340 #if 0
341 /*
342 ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec).
343 */
344 static u32
345 float_to_cellrate(u16 rate)
346 {
347   u32   exp, mantissa, cps;
348   if ((rate & NZ) == 0)
349      return 0;
350   exp = (rate >> M_BITS) & E_MASK;
351   mantissa = rate & M_MASK;
352   if (exp == 0)
353      return 1;
354   cps = (1 << M_BITS) | mantissa;
355   if (exp == M_BITS)
356      cps = cps;
357   else if (exp > M_BITS)
358      cps <<= (exp - M_BITS);
359   else
360      cps >>= (M_BITS - exp);
361   return cps;
362 }
363 #endif
364 
365 static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) {
366   srv_p->class_type = ATM_ABR;
367   srv_p->pcr        = dev->LineRate;
368   srv_p->mcr        = 0;
369   srv_p->icr        = 0x055cb7;
370   srv_p->tbe        = 0xffffff;
371   srv_p->frtt       = 0x3a;
372   srv_p->rif        = 0xf;
373   srv_p->rdf        = 0xb;
374   srv_p->nrm        = 0x4;
375   srv_p->trm        = 0x7;
376   srv_p->cdf        = 0x3;
377   srv_p->adtf       = 50;
378 }
379 
380 static int
381 ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p,
382                                                 struct atm_vcc *vcc, u8 flag)
383 {
384   f_vc_abr_entry  *f_abr_vc;
385   r_vc_abr_entry  *r_abr_vc;
386   u32		icr;
387   u8		trm, nrm, crm;
388   u16		adtf, air, *ptr16;
389   f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR;
390   f_abr_vc += vcc->vci;
391   switch (flag) {
392      case 1: /* FFRED initialization */
393 #if 0  /* sanity check */
394        if (srv_p->pcr == 0)
395           return INVALID_PCR;
396        if (srv_p->pcr > dev->LineRate)
397           srv_p->pcr = dev->LineRate;
398        if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate)
399 	  return MCR_UNAVAILABLE;
400        if (srv_p->mcr > srv_p->pcr)
401 	  return INVALID_MCR;
402        if (!(srv_p->icr))
403 	  srv_p->icr = srv_p->pcr;
404        if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr))
405 	  return INVALID_ICR;
406        if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE))
407 	  return INVALID_TBE;
408        if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT))
409 	  return INVALID_FRTT;
410        if (srv_p->nrm > MAX_NRM)
411 	  return INVALID_NRM;
412        if (srv_p->trm > MAX_TRM)
413 	  return INVALID_TRM;
414        if (srv_p->adtf > MAX_ADTF)
415           return INVALID_ADTF;
416        else if (srv_p->adtf == 0)
417 	  srv_p->adtf = 1;
418        if (srv_p->cdf > MAX_CDF)
419 	  return INVALID_CDF;
420        if (srv_p->rif > MAX_RIF)
421 	  return INVALID_RIF;
422        if (srv_p->rdf > MAX_RDF)
423 	  return INVALID_RDF;
424 #endif
425        memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc));
426        f_abr_vc->f_vc_type = ABR;
427        nrm = 2 << srv_p->nrm;     /* (2 ** (srv_p->nrm +1)) */
428 			          /* i.e 2**n = 2 << (n-1) */
429        f_abr_vc->f_nrm = nrm << 8 | nrm;
430        trm = 100000/(2 << (16 - srv_p->trm));
431        if ( trm == 0) trm = 1;
432        f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm;
433        crm = srv_p->tbe / nrm;
434        if (crm == 0) crm = 1;
435        f_abr_vc->f_crm = crm & 0xff;
436        f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr);
437        icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ?
438 				((srv_p->tbe/srv_p->frtt)*1000000) :
439 				(1000000/(srv_p->frtt/srv_p->tbe)));
440        f_abr_vc->f_icr = cellrate_to_float(icr);
441        adtf = (10000 * srv_p->adtf)/8192;
442        if (adtf == 0) adtf = 1;
443        f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff;
444        f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr);
445        f_abr_vc->f_acr = f_abr_vc->f_icr;
446        f_abr_vc->f_status = 0x0042;
447        break;
448     case 0: /* RFRED initialization */
449        ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize);
450        *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR;
451        r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize);
452        r_abr_vc += vcc->vci;
453        r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f;
454        air = srv_p->pcr << (15 - srv_p->rif);
455        if (air == 0) air = 1;
456        r_abr_vc->r_air = cellrate_to_float(air);
457        dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR;
458        dev->sum_mcr	   += srv_p->mcr;
459        dev->n_abr++;
460        break;
461     default:
462        break;
463   }
464   return	0;
465 }
466 static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) {
467    u32 rateLow=0, rateHigh, rate;
468    int entries;
469    struct ia_vcc *ia_vcc;
470 
471    int   idealSlot =0, testSlot, toBeAssigned, inc;
472    u32   spacing;
473    u16  *SchedTbl, *TstSchedTbl;
474    u16  cbrVC, vcIndex;
475    u32   fracSlot    = 0;
476    u32   sp_mod      = 0;
477    u32   sp_mod2     = 0;
478 
479    /* IpAdjustTrafficParams */
480    if (vcc->qos.txtp.max_pcr <= 0) {
481       IF_ERR(printk("PCR for CBR not defined\n");)
482       return -1;
483    }
484    rate = vcc->qos.txtp.max_pcr;
485    entries = rate / dev->Granularity;
486    IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n",
487                                 entries, rate, dev->Granularity);)
488    if (entries < 1)
489       IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");)
490    rateLow  =  entries * dev->Granularity;
491    rateHigh = (entries + 1) * dev->Granularity;
492    if (3*(rate - rateLow) > (rateHigh - rate))
493       entries++;
494    if (entries > dev->CbrRemEntries) {
495       IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");)
496       IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n",
497                                        entries, dev->CbrRemEntries);)
498       return -EBUSY;
499    }
500 
501    ia_vcc = INPH_IA_VCC(vcc);
502    ia_vcc->NumCbrEntry = entries;
503    dev->sum_mcr += entries * dev->Granularity;
504    /* IaFFrednInsertCbrSched */
505    // Starting at an arbitrary location, place the entries into the table
506    // as smoothly as possible
507    cbrVC   = 0;
508    spacing = dev->CbrTotEntries / entries;
509    sp_mod  = dev->CbrTotEntries % entries; // get modulo
510    toBeAssigned = entries;
511    fracSlot = 0;
512    vcIndex  = vcc->vci;
513    IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);)
514    while (toBeAssigned)
515    {
516       // If this is the first time, start the table loading for this connection
517       // as close to entryPoint as possible.
518       if (toBeAssigned == entries)
519       {
520          idealSlot = dev->CbrEntryPt;
521          dev->CbrEntryPt += 2;    // Adding 2 helps to prevent clumping
522          if (dev->CbrEntryPt >= dev->CbrTotEntries)
523             dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary
524       } else {
525          idealSlot += (u32)(spacing + fracSlot); // Point to the next location
526          // in the table that would be  smoothest
527          fracSlot = ((sp_mod + sp_mod2) / entries);  // get new integer part
528          sp_mod2  = ((sp_mod + sp_mod2) % entries);  // calc new fractional part
529       }
530       if (idealSlot >= (int)dev->CbrTotEntries)
531          idealSlot -= dev->CbrTotEntries;
532       // Continuously check around this ideal value until a null
533       // location is encountered.
534       SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize);
535       inc = 0;
536       testSlot = idealSlot;
537       TstSchedTbl = (u16*)(SchedTbl+testSlot);  //set index and read in value
538       IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%x, NumToAssign=%d\n",
539                                 testSlot, (u32)TstSchedTbl,toBeAssigned);)
540       memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
541       while (cbrVC)  // If another VC at this location, we have to keep looking
542       {
543           inc++;
544           testSlot = idealSlot - inc;
545           if (testSlot < 0) { // Wrap if necessary
546              testSlot += dev->CbrTotEntries;
547              IF_CBR(printk("Testslot Wrap. STable Start=0x%x,Testslot=%d\n",
548                                                        (u32)SchedTbl,testSlot);)
549           }
550           TstSchedTbl = (u16 *)(SchedTbl + testSlot);  // set table index
551           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
552           if (!cbrVC)
553              break;
554           testSlot = idealSlot + inc;
555           if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary
556              testSlot -= dev->CbrTotEntries;
557              IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);)
558              IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n",
559                                             testSlot, toBeAssigned);)
560           }
561           // set table index and read in value
562           TstSchedTbl = (u16*)(SchedTbl + testSlot);
563           IF_CBR(printk("Reading CBR Tbl from 0x%x, CbrVal=0x%x Iteration %d\n",
564                           (u32)TstSchedTbl,cbrVC,inc);)
565           memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC));
566        } /* while */
567        // Move this VCI number into this location of the CBR Sched table.
568        memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex,sizeof(TstSchedTbl));
569        dev->CbrRemEntries--;
570        toBeAssigned--;
571    } /* while */
572 
573    /* IaFFrednCbrEnable */
574    dev->NumEnabledCBR++;
575    if (dev->NumEnabledCBR == 1) {
576        writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS);
577        IF_CBR(printk("CBR is enabled\n");)
578    }
579    return 0;
580 }
581 static void ia_cbrVc_close (struct atm_vcc *vcc) {
582    IADEV *iadev;
583    u16 *SchedTbl, NullVci = 0;
584    u32 i, NumFound;
585 
586    iadev = INPH_IA_DEV(vcc->dev);
587    iadev->NumEnabledCBR--;
588    SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize);
589    if (iadev->NumEnabledCBR == 0) {
590       writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
591       IF_CBR (printk("CBR support disabled\n");)
592    }
593    NumFound = 0;
594    for (i=0; i < iadev->CbrTotEntries; i++)
595    {
596       if (*SchedTbl == vcc->vci) {
597          iadev->CbrRemEntries++;
598          *SchedTbl = NullVci;
599          IF_CBR(NumFound++;)
600       }
601       SchedTbl++;
602    }
603    IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);)
604 }
605 
606 static int ia_avail_descs(IADEV *iadev) {
607    int tmp = 0;
608    ia_hack_tcq(iadev);
609    if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd)
610       tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2;
611    else
612       tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr -
613                    iadev->ffL.tcq_st) / 2;
614    return tmp;
615 }
616 
617 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb);
618 
619 static int ia_que_tx (IADEV *iadev) {
620    struct sk_buff *skb;
621    int num_desc;
622    struct atm_vcc *vcc;
623    struct ia_vcc *iavcc;
624    num_desc = ia_avail_descs(iadev);
625 
626    while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) {
627       if (!(vcc = ATM_SKB(skb)->vcc)) {
628          dev_kfree_skb_any(skb);
629          printk("ia_que_tx: Null vcc\n");
630          break;
631       }
632       if (!test_bit(ATM_VF_READY,&vcc->flags)) {
633          dev_kfree_skb_any(skb);
634          printk("Free the SKB on closed vci %d \n", vcc->vci);
635          break;
636       }
637       iavcc = INPH_IA_VCC(vcc);
638       if (ia_pkt_tx (vcc, skb)) {
639          skb_queue_head(&iadev->tx_backlog, skb);
640       }
641       num_desc--;
642    }
643    return 0;
644 }
645 
646 static void ia_tx_poll (IADEV *iadev) {
647    struct atm_vcc *vcc = NULL;
648    struct sk_buff *skb = NULL, *skb1 = NULL;
649    struct ia_vcc *iavcc;
650    IARTN_Q *  rtne;
651 
652    ia_hack_tcq(iadev);
653    while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) {
654        skb = rtne->data.txskb;
655        if (!skb) {
656            printk("ia_tx_poll: skb is null\n");
657            goto out;
658        }
659        vcc = ATM_SKB(skb)->vcc;
660        if (!vcc) {
661            printk("ia_tx_poll: vcc is null\n");
662            dev_kfree_skb_any(skb);
663 	   goto out;
664        }
665 
666        iavcc = INPH_IA_VCC(vcc);
667        if (!iavcc) {
668            printk("ia_tx_poll: iavcc is null\n");
669            dev_kfree_skb_any(skb);
670 	   goto out;
671        }
672 
673        skb1 = skb_dequeue(&iavcc->txing_skb);
674        while (skb1 && (skb1 != skb)) {
675           if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) {
676              printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci);
677           }
678           IF_ERR(printk("Release the SKB not match\n");)
679           if ((vcc->pop) && (skb1->len != 0))
680           {
681              vcc->pop(vcc, skb1);
682              IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n",
683                                                           (long)skb1);)
684           }
685           else
686              dev_kfree_skb_any(skb1);
687           skb1 = skb_dequeue(&iavcc->txing_skb);
688        }
689        if (!skb1) {
690           IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);)
691           ia_enque_head_rtn_q (&iadev->tx_return_q, rtne);
692           break;
693        }
694        if ((vcc->pop) && (skb->len != 0))
695        {
696           vcc->pop(vcc, skb);
697           IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);)
698        }
699        else
700           dev_kfree_skb_any(skb);
701        kfree(rtne);
702     }
703     ia_que_tx(iadev);
704 out:
705     return;
706 }
707 #if 0
708 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val)
709 {
710         u32	t;
711 	int	i;
712 	/*
713 	 * Issue a command to enable writes to the NOVRAM
714 	 */
715 	NVRAM_CMD (EXTEND + EWEN);
716 	NVRAM_CLR_CE;
717 	/*
718 	 * issue the write command
719 	 */
720 	NVRAM_CMD(IAWRITE + addr);
721 	/*
722 	 * Send the data, starting with D15, then D14, and so on for 16 bits
723 	 */
724 	for (i=15; i>=0; i--) {
725 		NVRAM_CLKOUT (val & 0x8000);
726 		val <<= 1;
727 	}
728 	NVRAM_CLR_CE;
729 	CFG_OR(NVCE);
730 	t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
731 	while (!(t & NVDO))
732 		t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS);
733 
734 	NVRAM_CLR_CE;
735 	/*
736 	 * disable writes again
737 	 */
738 	NVRAM_CMD(EXTEND + EWDS)
739 	NVRAM_CLR_CE;
740 	CFG_AND(~NVDI);
741 }
742 #endif
743 
744 static u16 ia_eeprom_get (IADEV *iadev, u32 addr)
745 {
746 	u_short	val;
747         u32	t;
748 	int	i;
749 	/*
750 	 * Read the first bit that was clocked with the falling edge of the
751 	 * the last command data clock
752 	 */
753 	NVRAM_CMD(IAREAD + addr);
754 	/*
755 	 * Now read the rest of the bits, the next bit read is D14, then D13,
756 	 * and so on.
757 	 */
758 	val = 0;
759 	for (i=15; i>=0; i--) {
760 		NVRAM_CLKIN(t);
761 		val |= (t << i);
762 	}
763 	NVRAM_CLR_CE;
764 	CFG_AND(~NVDI);
765 	return val;
766 }
767 
768 static void ia_hw_type(IADEV *iadev) {
769    u_short memType = ia_eeprom_get(iadev, 25);
770    iadev->memType = memType;
771    if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) {
772       iadev->num_tx_desc = IA_TX_BUF;
773       iadev->tx_buf_sz = IA_TX_BUF_SZ;
774       iadev->num_rx_desc = IA_RX_BUF;
775       iadev->rx_buf_sz = IA_RX_BUF_SZ;
776    } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) {
777       if (IA_TX_BUF == DFL_TX_BUFFERS)
778         iadev->num_tx_desc = IA_TX_BUF / 2;
779       else
780         iadev->num_tx_desc = IA_TX_BUF;
781       iadev->tx_buf_sz = IA_TX_BUF_SZ;
782       if (IA_RX_BUF == DFL_RX_BUFFERS)
783         iadev->num_rx_desc = IA_RX_BUF / 2;
784       else
785         iadev->num_rx_desc = IA_RX_BUF;
786       iadev->rx_buf_sz = IA_RX_BUF_SZ;
787    }
788    else {
789       if (IA_TX_BUF == DFL_TX_BUFFERS)
790         iadev->num_tx_desc = IA_TX_BUF / 8;
791       else
792         iadev->num_tx_desc = IA_TX_BUF;
793       iadev->tx_buf_sz = IA_TX_BUF_SZ;
794       if (IA_RX_BUF == DFL_RX_BUFFERS)
795         iadev->num_rx_desc = IA_RX_BUF / 8;
796       else
797         iadev->num_rx_desc = IA_RX_BUF;
798       iadev->rx_buf_sz = IA_RX_BUF_SZ;
799    }
800    iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz);
801    IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n",
802          iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc,
803          iadev->rx_buf_sz, iadev->rx_pkt_ram);)
804 
805 #if 0
806    if ((memType & FE_MASK) == FE_SINGLE_MODE) {
807       iadev->phy_type = PHY_OC3C_S;
808    else if ((memType & FE_MASK) == FE_UTP_OPTION)
809       iadev->phy_type = PHY_UTP155;
810    else
811      iadev->phy_type = PHY_OC3C_M;
812 #endif
813 
814    iadev->phy_type = memType & FE_MASK;
815    IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n",
816                                          memType,iadev->phy_type);)
817    if (iadev->phy_type == FE_25MBIT_PHY)
818       iadev->LineRate = (u32)(((25600000/8)*26)/(27*53));
819    else if (iadev->phy_type == FE_DS3_PHY)
820       iadev->LineRate = (u32)(((44736000/8)*26)/(27*53));
821    else if (iadev->phy_type == FE_E3_PHY)
822       iadev->LineRate = (u32)(((34368000/8)*26)/(27*53));
823    else
824        iadev->LineRate = (u32)(ATM_OC3_PCR);
825    IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);)
826 
827 }
828 
829 static void IaFrontEndIntr(IADEV *iadev) {
830   volatile IA_SUNI *suni;
831   volatile ia_mb25_t *mb25;
832   volatile suni_pm7345_t *suni_pm7345;
833   u32 intr_status;
834   u_int frmr_intr;
835 
836   if(iadev->phy_type & FE_25MBIT_PHY) {
837      mb25 = (ia_mb25_t*)iadev->phy;
838      iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
839   } else if (iadev->phy_type & FE_DS3_PHY) {
840      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
841      /* clear FRMR interrupts */
842      frmr_intr   = suni_pm7345->suni_ds3_frm_intr_stat;
843      iadev->carrier_detect =
844            Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
845   } else if (iadev->phy_type & FE_E3_PHY ) {
846      suni_pm7345 = (suni_pm7345_t *)iadev->phy;
847      frmr_intr   = suni_pm7345->suni_e3_frm_maint_intr_ind;
848      iadev->carrier_detect =
849            Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS));
850   }
851   else {
852      suni = (IA_SUNI *)iadev->phy;
853      intr_status = suni->suni_rsop_status & 0xff;
854      iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV));
855   }
856   if (iadev->carrier_detect)
857     printk("IA: SUNI carrier detected\n");
858   else
859     printk("IA: SUNI carrier lost signal\n");
860   return;
861 }
862 
863 static void ia_mb25_init (IADEV *iadev)
864 {
865    volatile ia_mb25_t  *mb25 = (ia_mb25_t*)iadev->phy;
866 #if 0
867    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED;
868 #endif
869    mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC;
870    mb25->mb25_diag_control = 0;
871    /*
872     * Initialize carrier detect state
873     */
874    iadev->carrier_detect =  Boolean(mb25->mb25_intr_status & MB25_IS_GSB);
875    return;
876 }
877 
878 static void ia_suni_pm7345_init (IADEV *iadev)
879 {
880    volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy;
881    if (iadev->phy_type & FE_DS3_PHY)
882    {
883       iadev->carrier_detect =
884           Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV));
885       suni_pm7345->suni_ds3_frm_intr_enbl = 0x17;
886       suni_pm7345->suni_ds3_frm_cfg = 1;
887       suni_pm7345->suni_ds3_tran_cfg = 1;
888       suni_pm7345->suni_config = 0;
889       suni_pm7345->suni_splr_cfg = 0;
890       suni_pm7345->suni_splt_cfg = 0;
891    }
892    else
893    {
894       iadev->carrier_detect =
895           Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS));
896       suni_pm7345->suni_e3_frm_fram_options = 0x4;
897       suni_pm7345->suni_e3_frm_maint_options = 0x20;
898       suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d;
899       suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30;
900       suni_pm7345->suni_e3_tran_stat_diag_options = 0x0;
901       suni_pm7345->suni_e3_tran_fram_options = 0x1;
902       suni_pm7345->suni_config = SUNI_PM7345_E3ENBL;
903       suni_pm7345->suni_splr_cfg = 0x41;
904       suni_pm7345->suni_splt_cfg = 0x41;
905    }
906    /*
907     * Enable RSOP loss of signal interrupt.
908     */
909    suni_pm7345->suni_intr_enbl = 0x28;
910 
911    /*
912     * Clear error counters
913     */
914    suni_pm7345->suni_id_reset = 0;
915 
916    /*
917     * Clear "PMCTST" in master test register.
918     */
919    suni_pm7345->suni_master_test = 0;
920 
921    suni_pm7345->suni_rxcp_ctrl = 0x2c;
922    suni_pm7345->suni_rxcp_fctrl = 0x81;
923 
924    suni_pm7345->suni_rxcp_idle_pat_h1 =
925    	suni_pm7345->suni_rxcp_idle_pat_h2 =
926    	suni_pm7345->suni_rxcp_idle_pat_h3 = 0;
927    suni_pm7345->suni_rxcp_idle_pat_h4 = 1;
928 
929    suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff;
930    suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff;
931    suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff;
932    suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe;
933 
934    suni_pm7345->suni_rxcp_cell_pat_h1 =
935    	suni_pm7345->suni_rxcp_cell_pat_h2 =
936    	suni_pm7345->suni_rxcp_cell_pat_h3 = 0;
937    suni_pm7345->suni_rxcp_cell_pat_h4 = 1;
938 
939    suni_pm7345->suni_rxcp_cell_mask_h1 =
940    	suni_pm7345->suni_rxcp_cell_mask_h2 =
941    	suni_pm7345->suni_rxcp_cell_mask_h3 =
942    	suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff;
943 
944    suni_pm7345->suni_txcp_ctrl = 0xa4;
945    suni_pm7345->suni_txcp_intr_en_sts = 0x10;
946    suni_pm7345->suni_txcp_idle_pat_h5 = 0x55;
947 
948    suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB |
949                                  SUNI_PM7345_CLB |
950                                  SUNI_PM7345_DLB |
951                                   SUNI_PM7345_PLB);
952 #ifdef __SNMP__
953    suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE;
954 #endif /* __SNMP__ */
955    return;
956 }
957 
958 
959 /***************************** IA_LIB END *****************************/
960 
961 static int tcnter = 0;
962 static void xdump( u_char*  cp, int  length, char*  prefix )
963 {
964     int col, count;
965     u_char prntBuf[120];
966     u_char*  pBuf = prntBuf;
967     count = 0;
968     while(count < length){
969         pBuf += sprintf( pBuf, "%s", prefix );
970         for(col = 0;count + col < length && col < 16; col++){
971             if (col != 0 && (col % 4) == 0)
972                 pBuf += sprintf( pBuf, " " );
973             pBuf += sprintf( pBuf, "%02X ", cp[count + col] );
974         }
975         while(col++ < 16){      /* pad end of buffer with blanks */
976             if ((col % 4) == 0)
977                 sprintf( pBuf, " " );
978             pBuf += sprintf( pBuf, "   " );
979         }
980         pBuf += sprintf( pBuf, "  " );
981         for(col = 0;count + col < length && col < 16; col++){
982             if (isprint((int)cp[count + col]))
983                 pBuf += sprintf( pBuf, "%c", cp[count + col] );
984             else
985                 pBuf += sprintf( pBuf, "." );
986                 }
987         sprintf( pBuf, "\n" );
988         // SPrint(prntBuf);
989         printk(prntBuf);
990         count += col;
991         pBuf = prntBuf;
992     }
993 
994 }  /* close xdump(... */
995 
996 
997 static struct atm_dev *ia_boards = NULL;
998 
999 #define ACTUAL_RAM_BASE \
1000 	RAM_BASE*((iadev->mem)/(128 * 1024))
1001 #define ACTUAL_SEG_RAM_BASE \
1002 	IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1003 #define ACTUAL_REASS_RAM_BASE \
1004 	IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024))
1005 
1006 
1007 /*-- some utilities and memory allocation stuff will come here -------------*/
1008 
1009 static void desc_dbg(IADEV *iadev) {
1010 
1011   u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr;
1012   u32 i;
1013   void __iomem *tmp;
1014   // regval = readl((u32)ia_cmds->maddr);
1015   tcq_wr_ptr =  readw(iadev->seg_reg+TCQ_WR_PTR);
1016   printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n",
1017                      tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr),
1018                      readw(iadev->seg_ram+tcq_wr_ptr-2));
1019   printk(" host_tcq_wr = 0x%x  host_tcq_rd = 0x%x \n",  iadev->host_tcq_wr,
1020                    iadev->ffL.tcq_rd);
1021   tcq_st_ptr =  readw(iadev->seg_reg+TCQ_ST_ADR);
1022   tcq_ed_ptr =  readw(iadev->seg_reg+TCQ_ED_ADR);
1023   printk("tcq_st_ptr = 0x%x    tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr);
1024   i = 0;
1025   while (tcq_st_ptr != tcq_ed_ptr) {
1026       tmp = iadev->seg_ram+tcq_st_ptr;
1027       printk("TCQ slot %d desc = %d  Addr = %p\n", i++, readw(tmp), tmp);
1028       tcq_st_ptr += 2;
1029   }
1030   for(i=0; i <iadev->num_tx_desc; i++)
1031       printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp);
1032 }
1033 
1034 
1035 /*----------------------------- Recieving side stuff --------------------------*/
1036 
1037 static void rx_excp_rcvd(struct atm_dev *dev)
1038 {
1039 #if 0 /* closing the receiving size will cause too many excp int */
1040   IADEV *iadev;
1041   u_short state;
1042   u_short excpq_rd_ptr;
1043   //u_short *ptr;
1044   int vci, error = 1;
1045   iadev = INPH_IA_DEV(dev);
1046   state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1047   while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY)
1048   { printk("state = %x \n", state);
1049         excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff;
1050  printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr);
1051         if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR))
1052             IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");)
1053         // TODO: update exception stat
1054 	vci = readw(iadev->reass_ram+excpq_rd_ptr);
1055 	error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007;
1056         // pwang_test
1057 	excpq_rd_ptr += 4;
1058 	if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff))
1059  	    excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff;
1060 	writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR);
1061         state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1062   }
1063 #endif
1064 }
1065 
1066 static void free_desc(struct atm_dev *dev, int desc)
1067 {
1068 	IADEV *iadev;
1069 	iadev = INPH_IA_DEV(dev);
1070         writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr);
1071 	iadev->rfL.fdq_wr +=2;
1072 	if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed)
1073 		iadev->rfL.fdq_wr =  iadev->rfL.fdq_st;
1074 	writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR);
1075 }
1076 
1077 
1078 static int rx_pkt(struct atm_dev *dev)
1079 {
1080 	IADEV *iadev;
1081 	struct atm_vcc *vcc;
1082 	unsigned short status;
1083 	struct rx_buf_desc __iomem *buf_desc_ptr;
1084 	int desc;
1085 	struct dle* wr_ptr;
1086 	int len;
1087 	struct sk_buff *skb;
1088 	u_int buf_addr, dma_addr;
1089 
1090 	iadev = INPH_IA_DEV(dev);
1091 	if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff))
1092 	{
1093    	    printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number);
1094 	    return -EINVAL;
1095 	}
1096 	/* mask 1st 3 bits to get the actual descno. */
1097 	desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff;
1098         IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n",
1099                                     iadev->reass_ram, iadev->rfL.pcq_rd, desc);
1100               printk(" pcq_wr_ptr = 0x%x\n",
1101                                readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);)
1102 	/* update the read pointer  - maybe we shud do this in the end*/
1103 	if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed)
1104 		iadev->rfL.pcq_rd = iadev->rfL.pcq_st;
1105 	else
1106 		iadev->rfL.pcq_rd += 2;
1107 	writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR);
1108 
1109 	/* get the buffer desc entry.
1110 		update stuff. - doesn't seem to be any update necessary
1111 	*/
1112 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1113 	/* make the ptr point to the corresponding buffer desc entry */
1114 	buf_desc_ptr += desc;
1115         if (!desc || (desc > iadev->num_rx_desc) ||
1116                       ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) {
1117             free_desc(dev, desc);
1118             IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);)
1119             return -1;
1120         }
1121 	vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff];
1122 	if (!vcc)
1123 	{
1124                 free_desc(dev, desc);
1125 		printk("IA: null vcc, drop PDU\n");
1126 		return -1;
1127 	}
1128 
1129 
1130 	/* might want to check the status bits for errors */
1131 	status = (u_short) (buf_desc_ptr->desc_mode);
1132 	if (status & (RX_CER | RX_PTE | RX_OFL))
1133 	{
1134                 atomic_inc(&vcc->stats->rx_err);
1135 		IF_ERR(printk("IA: bad packet, dropping it");)
1136                 if (status & RX_CER) {
1137                     IF_ERR(printk(" cause: packet CRC error\n");)
1138                 }
1139                 else if (status & RX_PTE) {
1140                     IF_ERR(printk(" cause: packet time out\n");)
1141                 }
1142                 else {
1143                     IF_ERR(printk(" cause: buffer over flow\n");)
1144                 }
1145 		goto out_free_desc;
1146 	}
1147 
1148 	/*
1149 		build DLE.
1150 	*/
1151 
1152 	buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo;
1153 	dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo;
1154 	len = dma_addr - buf_addr;
1155         if (len > iadev->rx_buf_sz) {
1156            printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
1157            atomic_inc(&vcc->stats->rx_err);
1158 	   goto out_free_desc;
1159         }
1160 
1161         if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) {
1162            if (vcc->vci < 32)
1163               printk("Drop control packets\n");
1164 	      goto out_free_desc;
1165         }
1166 	skb_put(skb,len);
1167         // pwang_test
1168         ATM_SKB(skb)->vcc = vcc;
1169         ATM_DESC(skb) = desc;
1170 	skb_queue_tail(&iadev->rx_dma_q, skb);
1171 
1172 	/* Build the DLE structure */
1173 	wr_ptr = iadev->rx_dle_q.write;
1174 	wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
1175 		len, PCI_DMA_FROMDEVICE);
1176 	wr_ptr->local_pkt_addr = buf_addr;
1177 	wr_ptr->bytes = len;	/* We don't know this do we ?? */
1178 	wr_ptr->mode = DMA_INT_ENABLE;
1179 
1180 	/* shud take care of wrap around here too. */
1181         if(++wr_ptr == iadev->rx_dle_q.end)
1182              wr_ptr = iadev->rx_dle_q.start;
1183 	iadev->rx_dle_q.write = wr_ptr;
1184 	udelay(1);
1185 	/* Increment transaction counter */
1186 	writel(1, iadev->dma+IPHASE5575_RX_COUNTER);
1187 out:	return 0;
1188 out_free_desc:
1189         free_desc(dev, desc);
1190         goto out;
1191 }
1192 
1193 static void rx_intr(struct atm_dev *dev)
1194 {
1195   IADEV *iadev;
1196   u_short status;
1197   u_short state, i;
1198 
1199   iadev = INPH_IA_DEV(dev);
1200   status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff;
1201   IF_EVENT(printk("rx_intr: status = 0x%x\n", status);)
1202   if (status & RX_PKT_RCVD)
1203   {
1204 	/* do something */
1205 	/* Basically recvd an interrupt for receving a packet.
1206 	A descriptor would have been written to the packet complete
1207 	queue. Get all the descriptors and set up dma to move the
1208 	packets till the packet complete queue is empty..
1209 	*/
1210 	state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1211         IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);)
1212 	while(!(state & PCQ_EMPTY))
1213 	{
1214              rx_pkt(dev);
1215 	     state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1216 	}
1217         iadev->rxing = 1;
1218   }
1219   if (status & RX_FREEQ_EMPT)
1220   {
1221      if (iadev->rxing) {
1222         iadev->rx_tmp_cnt = iadev->rx_pkt_cnt;
1223         iadev->rx_tmp_jif = jiffies;
1224         iadev->rxing = 0;
1225      }
1226      else if (((jiffies - iadev->rx_tmp_jif) > 50) &&
1227                ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) {
1228         for (i = 1; i <= iadev->num_rx_desc; i++)
1229                free_desc(dev, i);
1230 printk("Test logic RUN!!!!\n");
1231         writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG);
1232         iadev->rxing = 1;
1233      }
1234      IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);)
1235   }
1236 
1237   if (status & RX_EXCP_RCVD)
1238   {
1239 	/* probably need to handle the exception queue also. */
1240 	IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);)
1241 	rx_excp_rcvd(dev);
1242   }
1243 
1244 
1245   if (status & RX_RAW_RCVD)
1246   {
1247 	/* need to handle the raw incoming cells. This deepnds on
1248 	whether we have programmed to receive the raw cells or not.
1249 	Else ignore. */
1250 	IF_EVENT(printk("Rx intr status:  RX_RAW_RCVD %08x\n", status);)
1251   }
1252 }
1253 
1254 
1255 static void rx_dle_intr(struct atm_dev *dev)
1256 {
1257   IADEV *iadev;
1258   struct atm_vcc *vcc;
1259   struct sk_buff *skb;
1260   int desc;
1261   u_short state;
1262   struct dle *dle, *cur_dle;
1263   u_int dle_lp;
1264   int len;
1265   iadev = INPH_IA_DEV(dev);
1266 
1267   /* free all the dles done, that is just update our own dle read pointer
1268 	- do we really need to do this. Think not. */
1269   /* DMA is done, just get all the recevie buffers from the rx dma queue
1270 	and push them up to the higher layer protocol. Also free the desc
1271 	associated with the buffer. */
1272   dle = iadev->rx_dle_q.read;
1273   dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1);
1274   cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4));
1275   while(dle != cur_dle)
1276   {
1277       /* free the DMAed skb */
1278       skb = skb_dequeue(&iadev->rx_dma_q);
1279       if (!skb)
1280          goto INCR_DLE;
1281       desc = ATM_DESC(skb);
1282       free_desc(dev, desc);
1283 
1284       if (!(len = skb->len))
1285       {
1286           printk("rx_dle_intr: skb len 0\n");
1287 	  dev_kfree_skb_any(skb);
1288       }
1289       else
1290       {
1291           struct cpcs_trailer *trailer;
1292           u_short length;
1293           struct ia_vcc *ia_vcc;
1294 
1295 	  pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr,
1296 	  	len, PCI_DMA_FROMDEVICE);
1297           /* no VCC related housekeeping done as yet. lets see */
1298           vcc = ATM_SKB(skb)->vcc;
1299 	  if (!vcc) {
1300 	      printk("IA: null vcc\n");
1301               dev_kfree_skb_any(skb);
1302               goto INCR_DLE;
1303           }
1304           ia_vcc = INPH_IA_VCC(vcc);
1305           if (ia_vcc == NULL)
1306           {
1307              atomic_inc(&vcc->stats->rx_err);
1308              dev_kfree_skb_any(skb);
1309              atm_return(vcc, atm_guess_pdu2truesize(len));
1310              goto INCR_DLE;
1311            }
1312           // get real pkt length  pwang_test
1313           trailer = (struct cpcs_trailer*)((u_char *)skb->data +
1314                                  skb->len - sizeof(*trailer));
1315           length =  swap(trailer->length);
1316           if ((length > iadev->rx_buf_sz) || (length >
1317                               (skb->len - sizeof(struct cpcs_trailer))))
1318           {
1319              atomic_inc(&vcc->stats->rx_err);
1320              IF_ERR(printk("rx_dle_intr: Bad  AAL5 trailer %d (skb len %d)",
1321                                                             length, skb->len);)
1322              dev_kfree_skb_any(skb);
1323              atm_return(vcc, atm_guess_pdu2truesize(len));
1324              goto INCR_DLE;
1325           }
1326           skb_trim(skb, length);
1327 
1328 	  /* Display the packet */
1329 	  IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len);
1330           xdump(skb->data, skb->len, "RX: ");
1331           printk("\n");)
1332 
1333 	  IF_RX(printk("rx_dle_intr: skb push");)
1334 	  vcc->push(vcc,skb);
1335 	  atomic_inc(&vcc->stats->rx);
1336           iadev->rx_pkt_cnt++;
1337       }
1338 INCR_DLE:
1339       if (++dle == iadev->rx_dle_q.end)
1340     	  dle = iadev->rx_dle_q.start;
1341   }
1342   iadev->rx_dle_q.read = dle;
1343 
1344   /* if the interrupts are masked because there were no free desc available,
1345 		unmask them now. */
1346   if (!iadev->rxing) {
1347      state = readl(iadev->reass_reg + STATE_REG) & 0xffff;
1348      if (!(state & FREEQ_EMPTY)) {
1349         state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff;
1350         writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD),
1351                                       iadev->reass_reg+REASS_MASK_REG);
1352         iadev->rxing++;
1353      }
1354   }
1355 }
1356 
1357 
1358 static int open_rx(struct atm_vcc *vcc)
1359 {
1360 	IADEV *iadev;
1361 	u_short __iomem *vc_table;
1362 	u_short __iomem *reass_ptr;
1363 	IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);)
1364 
1365 	if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0;
1366 	iadev = INPH_IA_DEV(vcc->dev);
1367         if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
1368            if (iadev->phy_type & FE_25MBIT_PHY) {
1369                printk("IA:  ABR not support\n");
1370                return -EINVAL;
1371            }
1372         }
1373 	/* Make only this VCI in the vc table valid and let all
1374 		others be invalid entries */
1375 	vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize;
1376 	vc_table += vcc->vci;
1377 	/* mask the last 6 bits and OR it with 3 for 1K VCs */
1378 
1379         *vc_table = vcc->vci << 6;
1380 	/* Also keep a list of open rx vcs so that we can attach them with
1381 		incoming PDUs later. */
1382 	if ((vcc->qos.rxtp.traffic_class == ATM_ABR) ||
1383                                 (vcc->qos.txtp.traffic_class == ATM_ABR))
1384 	{
1385                 srv_cls_param_t srv_p;
1386                 init_abr_vc(iadev, &srv_p);
1387                 ia_open_abr_vc(iadev, &srv_p, vcc, 0);
1388 	}
1389        	else {  /* for UBR  later may need to add CBR logic */
1390         	reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize;
1391            	reass_ptr += vcc->vci;
1392            	*reass_ptr = NO_AAL5_PKT;
1393        	}
1394 
1395 	if (iadev->rx_open[vcc->vci])
1396 		printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n",
1397 			vcc->dev->number, vcc->vci);
1398 	iadev->rx_open[vcc->vci] = vcc;
1399 	return 0;
1400 }
1401 
1402 static int rx_init(struct atm_dev *dev)
1403 {
1404 	IADEV *iadev;
1405 	struct rx_buf_desc __iomem *buf_desc_ptr;
1406 	unsigned long rx_pkt_start = 0;
1407 	void *dle_addr;
1408 	struct abr_vc_table  *abr_vc_table;
1409 	u16 *vc_table;
1410 	u16 *reass_table;
1411         u16 *ptr16;
1412 	int i,j, vcsize_sel;
1413 	u_short freeq_st_adr;
1414 	u_short *freeq_start;
1415 
1416 	iadev = INPH_IA_DEV(dev);
1417   //    spin_lock_init(&iadev->rx_lock);
1418 
1419 	/* Allocate 4k bytes - more aligned than needed (4k boundary) */
1420 	dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1421 					&iadev->rx_dle_dma);
1422 	if (!dle_addr)  {
1423 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1424 		goto err_out;
1425 	}
1426 	iadev->rx_dle_q.start = (struct dle*)dle_addr;
1427 	iadev->rx_dle_q.read = iadev->rx_dle_q.start;
1428 	iadev->rx_dle_q.write = iadev->rx_dle_q.start;
1429 	iadev->rx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1430 	/* the end of the dle q points to the entry after the last
1431 	DLE that can be used. */
1432 
1433 	/* write the upper 20 bits of the start address to rx list address register */
1434 	writel(iadev->rx_dle_dma & 0xfffff000,
1435 	       iadev->dma + IPHASE5575_RX_LIST_ADDR);
1436 	IF_INIT(printk("Tx Dle list addr: 0x%08x value: 0x%0x\n",
1437                       (u32)(iadev->dma+IPHASE5575_TX_LIST_ADDR),
1438                       *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR));
1439 	printk("Rx Dle list addr: 0x%08x value: 0x%0x\n",
1440                       (u32)(iadev->dma+IPHASE5575_RX_LIST_ADDR),
1441                       *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));)
1442 
1443 	writew(0xffff, iadev->reass_reg+REASS_MASK_REG);
1444 	writew(0, iadev->reass_reg+MODE_REG);
1445 	writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG);
1446 
1447 	/* Receive side control memory map
1448 	   -------------------------------
1449 
1450 		Buffer descr	0x0000 (736 - 23K)
1451 		VP Table	0x5c00 (256 - 512)
1452 		Except q	0x5e00 (128 - 512)
1453 		Free buffer q	0x6000 (1K - 2K)
1454 		Packet comp q	0x6800 (1K - 2K)
1455 		Reass Table	0x7000 (1K - 2K)
1456 		VC Table	0x7800 (1K - 2K)
1457 		ABR VC Table	0x8000 (1K - 32K)
1458 	*/
1459 
1460 	/* Base address for Buffer Descriptor Table */
1461 	writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE);
1462 	/* Set the buffer size register */
1463 	writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE);
1464 
1465 	/* Initialize each entry in the Buffer Descriptor Table */
1466         iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize;
1467 	buf_desc_ptr = iadev->RX_DESC_BASE_ADDR;
1468 	memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1469 	buf_desc_ptr++;
1470 	rx_pkt_start = iadev->rx_pkt_ram;
1471 	for(i=1; i<=iadev->num_rx_desc; i++)
1472 	{
1473 		memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1474 		buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16;
1475 		buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff;
1476 		buf_desc_ptr++;
1477 		rx_pkt_start += iadev->rx_buf_sz;
1478 	}
1479 	IF_INIT(printk("Rx Buffer desc ptr: 0x%0x\n", (u32)(buf_desc_ptr));)
1480         i = FREE_BUF_DESC_Q*iadev->memSize;
1481 	writew(i >> 16,  iadev->reass_reg+REASS_QUEUE_BASE);
1482         writew(i, iadev->reass_reg+FREEQ_ST_ADR);
1483         writew(i+iadev->num_rx_desc*sizeof(u_short),
1484                                          iadev->reass_reg+FREEQ_ED_ADR);
1485         writew(i, iadev->reass_reg+FREEQ_RD_PTR);
1486         writew(i+iadev->num_rx_desc*sizeof(u_short),
1487                                         iadev->reass_reg+FREEQ_WR_PTR);
1488 	/* Fill the FREEQ with all the free descriptors. */
1489 	freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR);
1490 	freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr);
1491 	for(i=1; i<=iadev->num_rx_desc; i++)
1492 	{
1493 		*freeq_start = (u_short)i;
1494 		freeq_start++;
1495 	}
1496 	IF_INIT(printk("freeq_start: 0x%0x\n", (u32)freeq_start);)
1497         /* Packet Complete Queue */
1498         i = (PKT_COMP_Q * iadev->memSize) & 0xffff;
1499         writew(i, iadev->reass_reg+PCQ_ST_ADR);
1500         writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR);
1501         writew(i, iadev->reass_reg+PCQ_RD_PTR);
1502         writew(i, iadev->reass_reg+PCQ_WR_PTR);
1503 
1504         /* Exception Queue */
1505         i = (EXCEPTION_Q * iadev->memSize) & 0xffff;
1506         writew(i, iadev->reass_reg+EXCP_Q_ST_ADR);
1507         writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q),
1508                                              iadev->reass_reg+EXCP_Q_ED_ADR);
1509         writew(i, iadev->reass_reg+EXCP_Q_RD_PTR);
1510         writew(i, iadev->reass_reg+EXCP_Q_WR_PTR);
1511 
1512     	/* Load local copy of FREEQ and PCQ ptrs */
1513         iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff;
1514        	iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ;
1515 	iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff;
1516 	iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff;
1517         iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff;
1518 	iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff;
1519 	iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff;
1520 	iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff;
1521 
1522         IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x",
1523               iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd,
1524               iadev->rfL.pcq_wr);)
1525 	/* just for check - no VP TBL */
1526 	/* VP Table */
1527 	/* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */
1528 	/* initialize VP Table for invalid VPIs
1529 		- I guess we can write all 1s or 0x000f in the entire memory
1530 		  space or something similar.
1531 	*/
1532 
1533 	/* This seems to work and looks right to me too !!! */
1534         i =  REASS_TABLE * iadev->memSize;
1535 	writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE);
1536  	/* initialize Reassembly table to I don't know what ???? */
1537 	reass_table = (u16 *)(iadev->reass_ram+i);
1538         j = REASS_TABLE_SZ * iadev->memSize;
1539 	for(i=0; i < j; i++)
1540 		*reass_table++ = NO_AAL5_PKT;
1541        i = 8*1024;
1542        vcsize_sel =  0;
1543        while (i != iadev->num_vc) {
1544           i /= 2;
1545           vcsize_sel++;
1546        }
1547        i = RX_VC_TABLE * iadev->memSize;
1548        writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE);
1549        vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
1550         j = RX_VC_TABLE_SZ * iadev->memSize;
1551 	for(i = 0; i < j; i++)
1552 	{
1553 		/* shift the reassembly pointer by 3 + lower 3 bits of
1554 		vc_lkup_base register (=3 for 1K VCs) and the last byte
1555 		is those low 3 bits.
1556 		Shall program this later.
1557 		*/
1558 		*vc_table = (i << 6) | 15;	/* for invalid VCI */
1559 		vc_table++;
1560 	}
1561         /* ABR VC table */
1562         i =  ABR_VC_TABLE * iadev->memSize;
1563         writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE);
1564 
1565         i = ABR_VC_TABLE * iadev->memSize;
1566 	abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i);
1567         j = REASS_TABLE_SZ * iadev->memSize;
1568         memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table));
1569     	for(i = 0; i < j; i++) {
1570 		abr_vc_table->rdf = 0x0003;
1571              	abr_vc_table->air = 0x5eb1;
1572 	       	abr_vc_table++;
1573         }
1574 
1575 	/* Initialize other registers */
1576 
1577 	/* VP Filter Register set for VC Reassembly only */
1578 	writew(0xff00, iadev->reass_reg+VP_FILTER);
1579         writew(0, iadev->reass_reg+XTRA_RM_OFFSET);
1580 	writew(0x1,  iadev->reass_reg+PROTOCOL_ID);
1581 
1582 	/* Packet Timeout Count  related Registers :
1583 	   Set packet timeout to occur in about 3 seconds
1584 	   Set Packet Aging Interval count register to overflow in about 4 us
1585  	*/
1586         writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT );
1587         ptr16 = (u16*)j;
1588         i = ((u32)ptr16 >> 6) & 0xff;
1589 	ptr16  += j - 1;
1590 	i |=(((u32)ptr16 << 2) & 0xff00);
1591         writew(i, iadev->reass_reg+TMOUT_RANGE);
1592         /* initiate the desc_tble */
1593         for(i=0; i<iadev->num_tx_desc;i++)
1594             iadev->desc_tbl[i].timestamp = 0;
1595 
1596 	/* to clear the interrupt status register - read it */
1597 	readw(iadev->reass_reg+REASS_INTR_STATUS_REG);
1598 
1599 	/* Mask Register - clear it */
1600 	writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG);
1601 
1602 	skb_queue_head_init(&iadev->rx_dma_q);
1603 	iadev->rx_free_desc_qhead = NULL;
1604 
1605 	iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL);
1606 	if (!iadev->rx_open) {
1607 		printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
1608 		dev->number);
1609 		goto err_free_dle;
1610 	}
1611 
1612         iadev->rxing = 1;
1613         iadev->rx_pkt_cnt = 0;
1614 	/* Mode Register */
1615 	writew(R_ONLINE, iadev->reass_reg+MODE_REG);
1616 	return 0;
1617 
1618 err_free_dle:
1619 	pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
1620 			    iadev->rx_dle_dma);
1621 err_out:
1622 	return -ENOMEM;
1623 }
1624 
1625 
1626 /*
1627 	The memory map suggested in appendix A and the coding for it.
1628 	Keeping it around just in case we change our mind later.
1629 
1630 		Buffer descr	0x0000 (128 - 4K)
1631 		UBR sched	0x1000 (1K - 4K)
1632 		UBR Wait q	0x2000 (1K - 4K)
1633 		Commn queues	0x3000 Packet Ready, Trasmit comp(0x3100)
1634 					(128 - 256) each
1635 		extended VC	0x4000 (1K - 8K)
1636 		ABR sched	0x6000	and ABR wait queue (1K - 2K) each
1637 		CBR sched	0x7000 (as needed)
1638 		VC table	0x8000 (1K - 32K)
1639 */
1640 
1641 static void tx_intr(struct atm_dev *dev)
1642 {
1643 	IADEV *iadev;
1644 	unsigned short status;
1645         unsigned long flags;
1646 
1647 	iadev = INPH_IA_DEV(dev);
1648 
1649 	status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG);
1650         if (status & TRANSMIT_DONE){
1651 
1652            IF_EVENT(printk("Tansmit Done Intr logic run\n");)
1653            spin_lock_irqsave(&iadev->tx_lock, flags);
1654            ia_tx_poll(iadev);
1655            spin_unlock_irqrestore(&iadev->tx_lock, flags);
1656            writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
1657            if (iadev->close_pending)
1658                wake_up(&iadev->close_wait);
1659         }
1660 	if (status & TCQ_NOT_EMPTY)
1661 	{
1662 	    IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");)
1663 	}
1664 }
1665 
1666 static void tx_dle_intr(struct atm_dev *dev)
1667 {
1668         IADEV *iadev;
1669         struct dle *dle, *cur_dle;
1670         struct sk_buff *skb;
1671         struct atm_vcc *vcc;
1672         struct ia_vcc  *iavcc;
1673         u_int dle_lp;
1674         unsigned long flags;
1675 
1676         iadev = INPH_IA_DEV(dev);
1677         spin_lock_irqsave(&iadev->tx_lock, flags);
1678         dle = iadev->tx_dle_q.read;
1679         dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) &
1680                                         (sizeof(struct dle)*DLE_ENTRIES - 1);
1681         cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4));
1682         while (dle != cur_dle)
1683         {
1684             /* free the DMAed skb */
1685             skb = skb_dequeue(&iadev->tx_dma_q);
1686             if (!skb) break;
1687 
1688 	    /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */
1689 	    if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) {
1690 		pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len,
1691 				 PCI_DMA_TODEVICE);
1692 	    }
1693             vcc = ATM_SKB(skb)->vcc;
1694             if (!vcc) {
1695                   printk("tx_dle_intr: vcc is null\n");
1696 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1697                   dev_kfree_skb_any(skb);
1698 
1699                   return;
1700             }
1701             iavcc = INPH_IA_VCC(vcc);
1702             if (!iavcc) {
1703                   printk("tx_dle_intr: iavcc is null\n");
1704 		  spin_unlock_irqrestore(&iadev->tx_lock, flags);
1705                   dev_kfree_skb_any(skb);
1706                   return;
1707             }
1708             if (vcc->qos.txtp.pcr >= iadev->rate_limit) {
1709                if ((vcc->pop) && (skb->len != 0))
1710                {
1711                  vcc->pop(vcc, skb);
1712                }
1713                else {
1714                  dev_kfree_skb_any(skb);
1715                }
1716             }
1717             else { /* Hold the rate-limited skb for flow control */
1718                IA_SKB_STATE(skb) |= IA_DLED;
1719                skb_queue_tail(&iavcc->txing_skb, skb);
1720             }
1721             IF_EVENT(printk("tx_dle_intr: enque skb = 0x%x \n", (u32)skb);)
1722             if (++dle == iadev->tx_dle_q.end)
1723                  dle = iadev->tx_dle_q.start;
1724         }
1725         iadev->tx_dle_q.read = dle;
1726         spin_unlock_irqrestore(&iadev->tx_lock, flags);
1727 }
1728 
1729 static int open_tx(struct atm_vcc *vcc)
1730 {
1731 	struct ia_vcc *ia_vcc;
1732 	IADEV *iadev;
1733 	struct main_vc *vc;
1734 	struct ext_vc *evc;
1735         int ret;
1736 	IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);)
1737 	if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0;
1738 	iadev = INPH_IA_DEV(vcc->dev);
1739 
1740         if (iadev->phy_type & FE_25MBIT_PHY) {
1741            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
1742                printk("IA:  ABR not support\n");
1743                return -EINVAL;
1744            }
1745 	  if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1746                printk("IA:  CBR not support\n");
1747                return -EINVAL;
1748           }
1749         }
1750         ia_vcc =  INPH_IA_VCC(vcc);
1751         memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc));
1752         if (vcc->qos.txtp.max_sdu >
1753                          (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){
1754            printk("IA:  SDU size over (%d) the configured SDU size %d\n",
1755 		  vcc->qos.txtp.max_sdu,iadev->tx_buf_sz);
1756 	   vcc->dev_data = NULL;
1757            kfree(ia_vcc);
1758            return -EINVAL;
1759         }
1760 	ia_vcc->vc_desc_cnt = 0;
1761         ia_vcc->txing = 1;
1762 
1763         /* find pcr */
1764         if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR)
1765            vcc->qos.txtp.pcr = iadev->LineRate;
1766         else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0))
1767            vcc->qos.txtp.pcr = iadev->LineRate;
1768         else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0))
1769            vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr;
1770         if (vcc->qos.txtp.pcr > iadev->LineRate)
1771              vcc->qos.txtp.pcr = iadev->LineRate;
1772         ia_vcc->pcr = vcc->qos.txtp.pcr;
1773 
1774         if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10;
1775         else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ;
1776         else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ;
1777         else ia_vcc->ltimeout = 2700 * HZ  / ia_vcc->pcr;
1778         if (ia_vcc->pcr < iadev->rate_limit)
1779            skb_queue_head_init (&ia_vcc->txing_skb);
1780         if (ia_vcc->pcr < iadev->rate_limit) {
1781 	   struct sock *sk = sk_atm(vcc);
1782 
1783 	   if (vcc->qos.txtp.max_sdu != 0) {
1784                if (ia_vcc->pcr > 60000)
1785                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5;
1786                else if (ia_vcc->pcr > 2000)
1787                   sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4;
1788                else
1789                  sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3;
1790            }
1791            else
1792              sk->sk_sndbuf = 24576;
1793         }
1794 
1795 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
1796 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
1797 	vc += vcc->vci;
1798 	evc += vcc->vci;
1799 	memset((caddr_t)vc, 0, sizeof(*vc));
1800 	memset((caddr_t)evc, 0, sizeof(*evc));
1801 
1802 	/* store the most significant 4 bits of vci as the last 4 bits
1803 		of first part of atm header.
1804 	   store the last 12 bits of vci as first 12 bits of the second
1805 		part of the atm header.
1806 	*/
1807 	evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f;
1808 	evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4;
1809 
1810 	/* check the following for different traffic classes */
1811 	if (vcc->qos.txtp.traffic_class == ATM_UBR)
1812 	{
1813 		vc->type = UBR;
1814                 vc->status = CRC_APPEND;
1815 		vc->acr = cellrate_to_float(iadev->LineRate);
1816                 if (vcc->qos.txtp.pcr > 0)
1817                    vc->acr = cellrate_to_float(vcc->qos.txtp.pcr);
1818                 IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n",
1819                                              vcc->qos.txtp.max_pcr,vc->acr);)
1820 	}
1821 	else if (vcc->qos.txtp.traffic_class == ATM_ABR)
1822 	{       srv_cls_param_t srv_p;
1823 		IF_ABR(printk("Tx ABR VCC\n");)
1824                 init_abr_vc(iadev, &srv_p);
1825                 if (vcc->qos.txtp.pcr > 0)
1826                    srv_p.pcr = vcc->qos.txtp.pcr;
1827                 if (vcc->qos.txtp.min_pcr > 0) {
1828                    int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr;
1829                    if (tmpsum > iadev->LineRate)
1830                        return -EBUSY;
1831                    srv_p.mcr = vcc->qos.txtp.min_pcr;
1832                    iadev->sum_mcr += vcc->qos.txtp.min_pcr;
1833                 }
1834                 else srv_p.mcr = 0;
1835                 if (vcc->qos.txtp.icr)
1836                    srv_p.icr = vcc->qos.txtp.icr;
1837                 if (vcc->qos.txtp.tbe)
1838                    srv_p.tbe = vcc->qos.txtp.tbe;
1839                 if (vcc->qos.txtp.frtt)
1840                    srv_p.frtt = vcc->qos.txtp.frtt;
1841                 if (vcc->qos.txtp.rif)
1842                    srv_p.rif = vcc->qos.txtp.rif;
1843                 if (vcc->qos.txtp.rdf)
1844                    srv_p.rdf = vcc->qos.txtp.rdf;
1845                 if (vcc->qos.txtp.nrm_pres)
1846                    srv_p.nrm = vcc->qos.txtp.nrm;
1847                 if (vcc->qos.txtp.trm_pres)
1848                    srv_p.trm = vcc->qos.txtp.trm;
1849                 if (vcc->qos.txtp.adtf_pres)
1850                    srv_p.adtf = vcc->qos.txtp.adtf;
1851                 if (vcc->qos.txtp.cdf_pres)
1852                    srv_p.cdf = vcc->qos.txtp.cdf;
1853                 if (srv_p.icr > srv_p.pcr)
1854                    srv_p.icr = srv_p.pcr;
1855                 IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d  mcr = %d\n",
1856                                                       srv_p.pcr, srv_p.mcr);)
1857 		ia_open_abr_vc(iadev, &srv_p, vcc, 1);
1858 	} else if (vcc->qos.txtp.traffic_class == ATM_CBR) {
1859                 if (iadev->phy_type & FE_25MBIT_PHY) {
1860                     printk("IA:  CBR not support\n");
1861                     return -EINVAL;
1862                 }
1863                 if (vcc->qos.txtp.max_pcr > iadev->LineRate) {
1864                    IF_CBR(printk("PCR is not availble\n");)
1865                    return -1;
1866                 }
1867                 vc->type = CBR;
1868                 vc->status = CRC_APPEND;
1869                 if ((ret = ia_cbr_setup (iadev, vcc)) < 0) {
1870                     return ret;
1871                 }
1872        }
1873 	else
1874            printk("iadev:  Non UBR, ABR and CBR traffic not supportedn");
1875 
1876         iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE;
1877 	IF_EVENT(printk("ia open_tx returning \n");)
1878 	return 0;
1879 }
1880 
1881 
1882 static int tx_init(struct atm_dev *dev)
1883 {
1884 	IADEV *iadev;
1885 	struct tx_buf_desc *buf_desc_ptr;
1886 	unsigned int tx_pkt_start;
1887 	void *dle_addr;
1888 	int i;
1889 	u_short tcq_st_adr;
1890 	u_short *tcq_start;
1891 	u_short prq_st_adr;
1892 	u_short *prq_start;
1893 	struct main_vc *vc;
1894 	struct ext_vc *evc;
1895         u_short tmp16;
1896         u32 vcsize_sel;
1897 
1898 	iadev = INPH_IA_DEV(dev);
1899         spin_lock_init(&iadev->tx_lock);
1900 
1901 	IF_INIT(printk("Tx MASK REG: 0x%0x\n",
1902                                 readw(iadev->seg_reg+SEG_MASK_REG));)
1903 
1904 	/* Allocate 4k (boundary aligned) bytes */
1905 	dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE,
1906 					&iadev->tx_dle_dma);
1907 	if (!dle_addr)  {
1908 		printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n");
1909 		goto err_out;
1910 	}
1911 	iadev->tx_dle_q.start = (struct dle*)dle_addr;
1912 	iadev->tx_dle_q.read = iadev->tx_dle_q.start;
1913 	iadev->tx_dle_q.write = iadev->tx_dle_q.start;
1914 	iadev->tx_dle_q.end = (struct dle*)((u32)dle_addr+sizeof(struct dle)*DLE_ENTRIES);
1915 
1916 	/* write the upper 20 bits of the start address to tx list address register */
1917 	writel(iadev->tx_dle_dma & 0xfffff000,
1918 	       iadev->dma + IPHASE5575_TX_LIST_ADDR);
1919 	writew(0xffff, iadev->seg_reg+SEG_MASK_REG);
1920 	writew(0, iadev->seg_reg+MODE_REG_0);
1921 	writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG);
1922         iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize;
1923         iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize;
1924         iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize;
1925 
1926 	/*
1927 	   Transmit side control memory map
1928 	   --------------------------------
1929 	 Buffer descr 	0x0000 (128 - 4K)
1930 	 Commn queues	0x1000	Transmit comp, Packet ready(0x1400)
1931 					(512 - 1K) each
1932 					TCQ - 4K, PRQ - 5K
1933 	 CBR Table 	0x1800 (as needed) - 6K
1934 	 UBR Table	0x3000 (1K - 4K) - 12K
1935 	 UBR Wait queue	0x4000 (1K - 4K) - 16K
1936 	 ABR sched	0x5000	and ABR wait queue (1K - 2K) each
1937 				ABR Tbl - 20K, ABR Wq - 22K
1938 	 extended VC	0x6000 (1K - 8K) - 24K
1939 	 VC Table	0x8000 (1K - 32K) - 32K
1940 
1941 	Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl
1942 	and Wait q, which can be allotted later.
1943 	*/
1944 
1945 	/* Buffer Descriptor Table Base address */
1946 	writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE);
1947 
1948 	/* initialize each entry in the buffer descriptor table */
1949 	buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE);
1950 	memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1951 	buf_desc_ptr++;
1952 	tx_pkt_start = TX_PACKET_RAM;
1953 	for(i=1; i<=iadev->num_tx_desc; i++)
1954 	{
1955 		memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr));
1956 		buf_desc_ptr->desc_mode = AAL5;
1957 		buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16;
1958 		buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff;
1959 		buf_desc_ptr++;
1960 		tx_pkt_start += iadev->tx_buf_sz;
1961 	}
1962         iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL);
1963         if (!iadev->tx_buf) {
1964             printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1965 	    goto err_free_dle;
1966         }
1967        	for (i= 0; i< iadev->num_tx_desc; i++)
1968        	{
1969 	    struct cpcs_trailer *cpcs;
1970 
1971        	    cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA);
1972             if(!cpcs) {
1973 		printk(KERN_ERR DEV_LABEL " couldn't get freepage\n");
1974 		goto err_free_tx_bufs;
1975             }
1976 	    iadev->tx_buf[i].cpcs = cpcs;
1977 	    iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci,
1978 		cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE);
1979         }
1980         iadev->desc_tbl = kmalloc(iadev->num_tx_desc *
1981                                    sizeof(struct desc_tbl_t), GFP_KERNEL);
1982 	if (!iadev->desc_tbl) {
1983 		printk(KERN_ERR DEV_LABEL " couldn't get mem\n");
1984 		goto err_free_all_tx_bufs;
1985 	}
1986 
1987 	/* Communication Queues base address */
1988         i = TX_COMP_Q * iadev->memSize;
1989 	writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE);
1990 
1991 	/* Transmit Complete Queue */
1992 	writew(i, iadev->seg_reg+TCQ_ST_ADR);
1993 	writew(i, iadev->seg_reg+TCQ_RD_PTR);
1994 	writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR);
1995 	iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short);
1996         writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
1997                                               iadev->seg_reg+TCQ_ED_ADR);
1998 	/* Fill the TCQ with all the free descriptors. */
1999 	tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR);
2000 	tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr);
2001 	for(i=1; i<=iadev->num_tx_desc; i++)
2002 	{
2003 		*tcq_start = (u_short)i;
2004 		tcq_start++;
2005 	}
2006 
2007 	/* Packet Ready Queue */
2008         i = PKT_RDY_Q * iadev->memSize;
2009 	writew(i, iadev->seg_reg+PRQ_ST_ADR);
2010 	writew(i+2 * iadev->num_tx_desc * sizeof(u_short),
2011                                               iadev->seg_reg+PRQ_ED_ADR);
2012 	writew(i, iadev->seg_reg+PRQ_RD_PTR);
2013 	writew(i, iadev->seg_reg+PRQ_WR_PTR);
2014 
2015         /* Load local copy of PRQ and TCQ ptrs */
2016         iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff;
2017 	iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff;
2018  	iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff;
2019 
2020 	iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff;
2021 	iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff;
2022 	iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff;
2023 
2024 	/* Just for safety initializing the queue to have desc 1 always */
2025 	/* Fill the PRQ with all the free descriptors. */
2026 	prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR);
2027 	prq_start = (u_short *)(iadev->seg_ram+prq_st_adr);
2028 	for(i=1; i<=iadev->num_tx_desc; i++)
2029 	{
2030 		*prq_start = (u_short)0;	/* desc 1 in all entries */
2031 		prq_start++;
2032 	}
2033 	/* CBR Table */
2034         IF_INIT(printk("Start CBR Init\n");)
2035 #if 1  /* for 1K VC board, CBR_PTR_BASE is 0 */
2036         writew(0,iadev->seg_reg+CBR_PTR_BASE);
2037 #else /* Charlie's logic is wrong ? */
2038         tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17;
2039         IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);)
2040         writew(tmp16,iadev->seg_reg+CBR_PTR_BASE);
2041 #endif
2042 
2043         IF_INIT(printk("value in register = 0x%x\n",
2044                                    readw(iadev->seg_reg+CBR_PTR_BASE));)
2045         tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1;
2046         writew(tmp16, iadev->seg_reg+CBR_TAB_BEG);
2047         IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16,
2048                                         readw(iadev->seg_reg+CBR_TAB_BEG));)
2049         writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR;
2050         tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1;
2051         writew(tmp16, iadev->seg_reg+CBR_TAB_END);
2052         IF_INIT(printk("iadev->seg_reg = 0x%x CBR_PTR_BASE = 0x%x\n",
2053                (u32)iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));)
2054         IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n",
2055           readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END),
2056           readw(iadev->seg_reg+CBR_TAB_END+1));)
2057 
2058         /* Initialize the CBR Schedualing Table */
2059         memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize,
2060                                                           0, iadev->num_vc*6);
2061         iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3;
2062         iadev->CbrEntryPt = 0;
2063         iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries;
2064         iadev->NumEnabledCBR = 0;
2065 
2066 	/* UBR scheduling Table and wait queue */
2067 	/* initialize all bytes of UBR scheduler table and wait queue to 0
2068 		- SCHEDSZ is 1K (# of entries).
2069 		- UBR Table size is 4K
2070 		- UBR wait queue is 4K
2071 	   since the table and wait queues are contiguous, all the bytes
2072 	   can be initialized by one memeset.
2073 	*/
2074 
2075         vcsize_sel = 0;
2076         i = 8*1024;
2077         while (i != iadev->num_vc) {
2078           i /= 2;
2079           vcsize_sel++;
2080         }
2081 
2082         i = MAIN_VC_TABLE * iadev->memSize;
2083         writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE);
2084         i =  EXT_VC_TABLE * iadev->memSize;
2085         writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE);
2086         i = UBR_SCHED_TABLE * iadev->memSize;
2087         writew((i & 0xffff) >> 11,  iadev->seg_reg+UBR_SBPTR_BASE);
2088         i = UBR_WAIT_Q * iadev->memSize;
2089         writew((i >> 7) & 0xffff,  iadev->seg_reg+UBRWQ_BASE);
2090  	memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize),
2091                                                        0, iadev->num_vc*8);
2092 	/* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/
2093 	/* initialize all bytes of ABR scheduler table and wait queue to 0
2094 		- SCHEDSZ is 1K (# of entries).
2095 		- ABR Table size is 2K
2096 		- ABR wait queue is 2K
2097 	   since the table and wait queues are contiguous, all the bytes
2098 	   can be intialized by one memeset.
2099 	*/
2100         i = ABR_SCHED_TABLE * iadev->memSize;
2101         writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE);
2102         i = ABR_WAIT_Q * iadev->memSize;
2103         writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE);
2104 
2105         i = ABR_SCHED_TABLE*iadev->memSize;
2106 	memset((caddr_t)(iadev->seg_ram+i),  0, iadev->num_vc*4);
2107 	vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR;
2108 	evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR;
2109         iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL);
2110         if (!iadev->testTable) {
2111            printk("Get freepage  failed\n");
2112 	   goto err_free_desc_tbl;
2113         }
2114 	for(i=0; i<iadev->num_vc; i++)
2115 	{
2116 		memset((caddr_t)vc, 0, sizeof(*vc));
2117 		memset((caddr_t)evc, 0, sizeof(*evc));
2118                 iadev->testTable[i] = kmalloc(sizeof(struct testTable_t),
2119 						GFP_KERNEL);
2120 		if (!iadev->testTable[i])
2121 			goto err_free_test_tables;
2122               	iadev->testTable[i]->lastTime = 0;
2123  		iadev->testTable[i]->fract = 0;
2124                 iadev->testTable[i]->vc_status = VC_UBR;
2125 		vc++;
2126 		evc++;
2127 	}
2128 
2129 	/* Other Initialization */
2130 
2131 	/* Max Rate Register */
2132         if (iadev->phy_type & FE_25MBIT_PHY) {
2133 	   writew(RATE25, iadev->seg_reg+MAXRATE);
2134 	   writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2135         }
2136         else {
2137 	   writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE);
2138 	   writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS);
2139         }
2140 	/* Set Idle Header Reigisters to be sure */
2141 	writew(0, iadev->seg_reg+IDLEHEADHI);
2142 	writew(0, iadev->seg_reg+IDLEHEADLO);
2143 
2144 	/* Program ABR UBR Priority Register  as  PRI_ABR_UBR_EQUAL */
2145         writew(0xaa00, iadev->seg_reg+ABRUBR_ARB);
2146 
2147         iadev->close_pending = 0;
2148         init_waitqueue_head(&iadev->close_wait);
2149         init_waitqueue_head(&iadev->timeout_wait);
2150 	skb_queue_head_init(&iadev->tx_dma_q);
2151 	ia_init_rtn_q(&iadev->tx_return_q);
2152 
2153 	/* RM Cell Protocol ID and Message Type */
2154 	writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE);
2155         skb_queue_head_init (&iadev->tx_backlog);
2156 
2157 	/* Mode Register 1 */
2158 	writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1);
2159 
2160 	/* Mode Register 0 */
2161 	writew(T_ONLINE, iadev->seg_reg+MODE_REG_0);
2162 
2163 	/* Interrupt Status Register - read to clear */
2164 	readw(iadev->seg_reg+SEG_INTR_STATUS_REG);
2165 
2166 	/* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */
2167         writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG);
2168         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2169         iadev->tx_pkt_cnt = 0;
2170         iadev->rate_limit = iadev->LineRate / 3;
2171 
2172 	return 0;
2173 
2174 err_free_test_tables:
2175 	while (--i >= 0)
2176 		kfree(iadev->testTable[i]);
2177 	kfree(iadev->testTable);
2178 err_free_desc_tbl:
2179 	kfree(iadev->desc_tbl);
2180 err_free_all_tx_bufs:
2181 	i = iadev->num_tx_desc;
2182 err_free_tx_bufs:
2183 	while (--i >= 0) {
2184 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2185 
2186 		pci_unmap_single(iadev->pci, desc->dma_addr,
2187 			sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2188 		kfree(desc->cpcs);
2189 	}
2190 	kfree(iadev->tx_buf);
2191 err_free_dle:
2192 	pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2193 			    iadev->tx_dle_dma);
2194 err_out:
2195 	return -ENOMEM;
2196 }
2197 
2198 static irqreturn_t ia_int(int irq, void *dev_id)
2199 {
2200    struct atm_dev *dev;
2201    IADEV *iadev;
2202    unsigned int status;
2203    int handled = 0;
2204 
2205    dev = dev_id;
2206    iadev = INPH_IA_DEV(dev);
2207    while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f))
2208    {
2209 	handled = 1;
2210         IF_EVENT(printk("ia_int: status = 0x%x\n", status);)
2211 	if (status & STAT_REASSINT)
2212 	{
2213 	   /* do something */
2214 	   IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);)
2215 	   rx_intr(dev);
2216 	}
2217 	if (status & STAT_DLERINT)
2218 	{
2219 	   /* Clear this bit by writing a 1 to it. */
2220 	   *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT;
2221 	   rx_dle_intr(dev);
2222 	}
2223 	if (status & STAT_SEGINT)
2224 	{
2225 	   /* do something */
2226            IF_EVENT(printk("IA: tx_intr \n");)
2227 	   tx_intr(dev);
2228 	}
2229 	if (status & STAT_DLETINT)
2230 	{
2231 	   *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT;
2232 	   tx_dle_intr(dev);
2233 	}
2234 	if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT))
2235 	{
2236            if (status & STAT_FEINT)
2237                IaFrontEndIntr(iadev);
2238 	}
2239    }
2240    return IRQ_RETVAL(handled);
2241 }
2242 
2243 
2244 
2245 /*----------------------------- entries --------------------------------*/
2246 static int get_esi(struct atm_dev *dev)
2247 {
2248 	IADEV *iadev;
2249 	int i;
2250 	u32 mac1;
2251 	u16 mac2;
2252 
2253 	iadev = INPH_IA_DEV(dev);
2254 	mac1 = cpu_to_be32(le32_to_cpu(readl(
2255 				iadev->reg+IPHASE5575_MAC1)));
2256 	mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2)));
2257 	IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);)
2258 	for (i=0; i<MAC1_LEN; i++)
2259 		dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i));
2260 
2261 	for (i=0; i<MAC2_LEN; i++)
2262 		dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i));
2263 	return 0;
2264 }
2265 
2266 static int reset_sar(struct atm_dev *dev)
2267 {
2268 	IADEV *iadev;
2269 	int i, error = 1;
2270 	unsigned int pci[64];
2271 
2272 	iadev = INPH_IA_DEV(dev);
2273 	for(i=0; i<64; i++)
2274 	  if ((error = pci_read_config_dword(iadev->pci,
2275 				i*4, &pci[i])) != PCIBIOS_SUCCESSFUL)
2276   	      return error;
2277 	writel(0, iadev->reg+IPHASE5575_EXT_RESET);
2278 	for(i=0; i<64; i++)
2279 	  if ((error = pci_write_config_dword(iadev->pci,
2280 					i*4, pci[i])) != PCIBIOS_SUCCESSFUL)
2281 	    return error;
2282 	udelay(5);
2283 	return 0;
2284 }
2285 
2286 
2287 static int __devinit ia_init(struct atm_dev *dev)
2288 {
2289 	IADEV *iadev;
2290 	unsigned long real_base;
2291 	void __iomem *base;
2292 	unsigned short command;
2293 	int error, i;
2294 
2295 	/* The device has been identified and registered. Now we read
2296 	   necessary configuration info like memory base address,
2297 	   interrupt number etc */
2298 
2299 	IF_INIT(printk(">ia_init\n");)
2300 	dev->ci_range.vpi_bits = 0;
2301 	dev->ci_range.vci_bits = NR_VCI_LD;
2302 
2303 	iadev = INPH_IA_DEV(dev);
2304 	real_base = pci_resource_start (iadev->pci, 0);
2305 	iadev->irq = iadev->pci->irq;
2306 
2307 	error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command);
2308 	if (error) {
2309 		printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n",
2310 				dev->number,error);
2311 		return -EINVAL;
2312 	}
2313 	IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n",
2314 			dev->number, iadev->pci->revision, real_base, iadev->irq);)
2315 
2316 	/* find mapping size of board */
2317 
2318 	iadev->pci_map_size = pci_resource_len(iadev->pci, 0);
2319 
2320         if (iadev->pci_map_size == 0x100000){
2321           iadev->num_vc = 4096;
2322 	  dev->ci_range.vci_bits = NR_VCI_4K_LD;
2323           iadev->memSize = 4;
2324         }
2325         else if (iadev->pci_map_size == 0x40000) {
2326           iadev->num_vc = 1024;
2327           iadev->memSize = 1;
2328         }
2329         else {
2330            printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size);
2331            return -EINVAL;
2332         }
2333 	IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);)
2334 
2335 	/* enable bus mastering */
2336 	pci_set_master(iadev->pci);
2337 
2338 	/*
2339 	 * Delay at least 1us before doing any mem accesses (how 'bout 10?)
2340 	 */
2341 	udelay(10);
2342 
2343 	/* mapping the physical address to a virtual address in address space */
2344 	base = ioremap(real_base,iadev->pci_map_size);  /* ioremap is not resolved ??? */
2345 
2346 	if (!base)
2347 	{
2348 		printk(DEV_LABEL " (itf %d): can't set up page mapping\n",
2349 			    dev->number);
2350 		return error;
2351 	}
2352 	IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n",
2353 			dev->number, iadev->pci->revision, base, iadev->irq);)
2354 
2355 	/* filling the iphase dev structure */
2356 	iadev->mem = iadev->pci_map_size /2;
2357 	iadev->real_base = real_base;
2358 	iadev->base = base;
2359 
2360 	/* Bus Interface Control Registers */
2361 	iadev->reg = base + REG_BASE;
2362 	/* Segmentation Control Registers */
2363 	iadev->seg_reg = base + SEG_BASE;
2364 	/* Reassembly Control Registers */
2365 	iadev->reass_reg = base + REASS_BASE;
2366 	/* Front end/ DMA control registers */
2367 	iadev->phy = base + PHY_BASE;
2368 	iadev->dma = base + PHY_BASE;
2369 	/* RAM - Segmentation RAm and Reassembly RAM */
2370 	iadev->ram = base + ACTUAL_RAM_BASE;
2371 	iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE;
2372 	iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE;
2373 
2374 	/* lets print out the above */
2375 	IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n",
2376           iadev->reg,iadev->seg_reg,iadev->reass_reg,
2377           iadev->phy, iadev->ram, iadev->seg_ram,
2378           iadev->reass_ram);)
2379 
2380 	/* lets try reading the MAC address */
2381 	error = get_esi(dev);
2382 	if (error) {
2383 	  iounmap(iadev->base);
2384 	  return error;
2385 	}
2386         printk("IA: ");
2387 	for (i=0; i < ESI_LEN; i++)
2388                 printk("%s%02X",i ? "-" : "",dev->esi[i]);
2389         printk("\n");
2390 
2391         /* reset SAR */
2392         if (reset_sar(dev)) {
2393 	   iounmap(iadev->base);
2394            printk("IA: reset SAR fail, please try again\n");
2395            return 1;
2396         }
2397 	return 0;
2398 }
2399 
2400 static void ia_update_stats(IADEV *iadev) {
2401     if (!iadev->carrier_detect)
2402         return;
2403     iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff;
2404     iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16;
2405     iadev->drop_rxpkt +=  readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff;
2406     iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff;
2407     iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff;
2408     iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16;
2409     return;
2410 }
2411 
2412 static void ia_led_timer(unsigned long arg) {
2413  	unsigned long flags;
2414   	static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
2415         u_char i;
2416         static u32 ctrl_reg;
2417         for (i = 0; i < iadev_count; i++) {
2418            if (ia_dev[i]) {
2419 	      ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2420 	      if (blinking[i] == 0) {
2421 		 blinking[i]++;
2422                  ctrl_reg &= (~CTRL_LED);
2423                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2424                  ia_update_stats(ia_dev[i]);
2425               }
2426               else {
2427 		 blinking[i] = 0;
2428 		 ctrl_reg |= CTRL_LED;
2429                  writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG);
2430                  spin_lock_irqsave(&ia_dev[i]->tx_lock, flags);
2431                  if (ia_dev[i]->close_pending)
2432                     wake_up(&ia_dev[i]->close_wait);
2433                  ia_tx_poll(ia_dev[i]);
2434                  spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags);
2435               }
2436            }
2437         }
2438 	mod_timer(&ia_timer, jiffies + HZ / 4);
2439  	return;
2440 }
2441 
2442 static void ia_phy_put(struct atm_dev *dev, unsigned char value,
2443 	unsigned long addr)
2444 {
2445 	writel(value, INPH_IA_DEV(dev)->phy+addr);
2446 }
2447 
2448 static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr)
2449 {
2450 	return readl(INPH_IA_DEV(dev)->phy+addr);
2451 }
2452 
2453 static void ia_free_tx(IADEV *iadev)
2454 {
2455 	int i;
2456 
2457 	kfree(iadev->desc_tbl);
2458 	for (i = 0; i < iadev->num_vc; i++)
2459 		kfree(iadev->testTable[i]);
2460 	kfree(iadev->testTable);
2461 	for (i = 0; i < iadev->num_tx_desc; i++) {
2462 		struct cpcs_trailer_desc *desc = iadev->tx_buf + i;
2463 
2464 		pci_unmap_single(iadev->pci, desc->dma_addr,
2465 			sizeof(*desc->cpcs), PCI_DMA_TODEVICE);
2466 		kfree(desc->cpcs);
2467 	}
2468 	kfree(iadev->tx_buf);
2469 	pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start,
2470 			    iadev->tx_dle_dma);
2471 }
2472 
2473 static void ia_free_rx(IADEV *iadev)
2474 {
2475 	kfree(iadev->rx_open);
2476 	pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start,
2477 			  iadev->rx_dle_dma);
2478 }
2479 
2480 static int __devinit ia_start(struct atm_dev *dev)
2481 {
2482 	IADEV *iadev;
2483 	int error;
2484 	unsigned char phy;
2485 	u32 ctrl_reg;
2486 	IF_EVENT(printk(">ia_start\n");)
2487 	iadev = INPH_IA_DEV(dev);
2488         if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) {
2489                 printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n",
2490                     dev->number, iadev->irq);
2491 		error = -EAGAIN;
2492 		goto err_out;
2493         }
2494         /* @@@ should release IRQ on error */
2495 	/* enabling memory + master */
2496         if ((error = pci_write_config_word(iadev->pci,
2497 				PCI_COMMAND,
2498 				PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER )))
2499 	{
2500                 printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+"
2501                     "master (0x%x)\n",dev->number, error);
2502 		error = -EIO;
2503 		goto err_free_irq;
2504         }
2505 	udelay(10);
2506 
2507 	/* Maybe we should reset the front end, initialize Bus Interface Control
2508 		Registers and see. */
2509 
2510 	IF_INIT(printk("Bus ctrl reg: %08x\n",
2511                             readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2512 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2513 	ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST))
2514 			| CTRL_B8
2515 			| CTRL_B16
2516 			| CTRL_B32
2517 			| CTRL_B48
2518 			| CTRL_B64
2519 			| CTRL_B128
2520 			| CTRL_ERRMASK
2521 			| CTRL_DLETMASK		/* shud be removed l8r */
2522 			| CTRL_DLERMASK
2523 			| CTRL_SEGMASK
2524 			| CTRL_REASSMASK
2525 			| CTRL_FEMASK
2526 			| CTRL_CSPREEMPT;
2527 
2528        writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2529 
2530 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2531                            readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));
2532 	   printk("Bus status reg after init: %08x\n",
2533                             readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));)
2534 
2535         ia_hw_type(iadev);
2536 	error = tx_init(dev);
2537 	if (error)
2538 		goto err_free_irq;
2539 	error = rx_init(dev);
2540 	if (error)
2541 		goto err_free_tx;
2542 
2543 	ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2544        	writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG);
2545 	IF_INIT(printk("Bus ctrl reg after initializing: %08x\n",
2546                                readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));)
2547         phy = 0; /* resolve compiler complaint */
2548         IF_INIT (
2549 	if ((phy=ia_phy_get(dev,0)) == 0x30)
2550 		printk("IA: pm5346,rev.%d\n",phy&0x0f);
2551 	else
2552 		printk("IA: utopia,rev.%0x\n",phy);)
2553 
2554 	if (iadev->phy_type &  FE_25MBIT_PHY)
2555            ia_mb25_init(iadev);
2556 	else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY))
2557            ia_suni_pm7345_init(iadev);
2558 	else {
2559 		error = suni_init(dev);
2560 		if (error)
2561 			goto err_free_rx;
2562 		/*
2563 		 * Enable interrupt on loss of signal
2564 		 * SUNI_RSOP_CIE - 0x10
2565 		 * SUNI_RSOP_CIE_LOSE - 0x04
2566 		 */
2567 		ia_phy_put(dev, ia_phy_get(dev, 0x10) | 0x04, 0x10);
2568 #ifndef MODULE
2569 		error = dev->phy->start(dev);
2570 		if (error)
2571 			goto err_free_rx;
2572 #endif
2573 		/* Get iadev->carrier_detect status */
2574 		IaFrontEndIntr(iadev);
2575 	}
2576 	return 0;
2577 
2578 err_free_rx:
2579 	ia_free_rx(iadev);
2580 err_free_tx:
2581 	ia_free_tx(iadev);
2582 err_free_irq:
2583 	free_irq(iadev->irq, dev);
2584 err_out:
2585 	return error;
2586 }
2587 
2588 static void ia_close(struct atm_vcc *vcc)
2589 {
2590 	DEFINE_WAIT(wait);
2591         u16 *vc_table;
2592         IADEV *iadev;
2593         struct ia_vcc *ia_vcc;
2594         struct sk_buff *skb = NULL;
2595         struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog;
2596         unsigned long closetime, flags;
2597 
2598         iadev = INPH_IA_DEV(vcc->dev);
2599         ia_vcc = INPH_IA_VCC(vcc);
2600 	if (!ia_vcc) return;
2601 
2602         IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d  vci = %d\n",
2603                                               ia_vcc->vc_desc_cnt,vcc->vci);)
2604 	clear_bit(ATM_VF_READY,&vcc->flags);
2605         skb_queue_head_init (&tmp_tx_backlog);
2606         skb_queue_head_init (&tmp_vcc_backlog);
2607         if (vcc->qos.txtp.traffic_class != ATM_NONE) {
2608            iadev->close_pending++;
2609 	   prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE);
2610 	   schedule_timeout(50);
2611 	   finish_wait(&iadev->timeout_wait, &wait);
2612            spin_lock_irqsave(&iadev->tx_lock, flags);
2613            while((skb = skb_dequeue(&iadev->tx_backlog))) {
2614               if (ATM_SKB(skb)->vcc == vcc){
2615                  if (vcc->pop) vcc->pop(vcc, skb);
2616                  else dev_kfree_skb_any(skb);
2617               }
2618               else
2619                  skb_queue_tail(&tmp_tx_backlog, skb);
2620            }
2621            while((skb = skb_dequeue(&tmp_tx_backlog)))
2622              skb_queue_tail(&iadev->tx_backlog, skb);
2623            IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);)
2624            closetime = 300000 / ia_vcc->pcr;
2625            if (closetime == 0)
2626               closetime = 1;
2627            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2628            wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime);
2629            spin_lock_irqsave(&iadev->tx_lock, flags);
2630            iadev->close_pending--;
2631            iadev->testTable[vcc->vci]->lastTime = 0;
2632            iadev->testTable[vcc->vci]->fract = 0;
2633            iadev->testTable[vcc->vci]->vc_status = VC_UBR;
2634            if (vcc->qos.txtp.traffic_class == ATM_ABR) {
2635               if (vcc->qos.txtp.min_pcr > 0)
2636                  iadev->sum_mcr -= vcc->qos.txtp.min_pcr;
2637            }
2638            if (vcc->qos.txtp.traffic_class == ATM_CBR) {
2639               ia_vcc = INPH_IA_VCC(vcc);
2640               iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity;
2641               ia_cbrVc_close (vcc);
2642            }
2643            spin_unlock_irqrestore(&iadev->tx_lock, flags);
2644         }
2645 
2646         if (vcc->qos.rxtp.traffic_class != ATM_NONE) {
2647            // reset reass table
2648            vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize);
2649            vc_table += vcc->vci;
2650            *vc_table = NO_AAL5_PKT;
2651            // reset vc table
2652            vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize);
2653            vc_table += vcc->vci;
2654            *vc_table = (vcc->vci << 6) | 15;
2655            if (vcc->qos.rxtp.traffic_class == ATM_ABR) {
2656               struct abr_vc_table __iomem *abr_vc_table =
2657                                 (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize);
2658               abr_vc_table +=  vcc->vci;
2659               abr_vc_table->rdf = 0x0003;
2660               abr_vc_table->air = 0x5eb1;
2661            }
2662            // Drain the packets
2663            rx_dle_intr(vcc->dev);
2664            iadev->rx_open[vcc->vci] = NULL;
2665         }
2666 	kfree(INPH_IA_VCC(vcc));
2667         ia_vcc = NULL;
2668         vcc->dev_data = NULL;
2669         clear_bit(ATM_VF_ADDR,&vcc->flags);
2670         return;
2671 }
2672 
2673 static int ia_open(struct atm_vcc *vcc)
2674 {
2675 	IADEV *iadev;
2676 	struct ia_vcc *ia_vcc;
2677 	int error;
2678 	if (!test_bit(ATM_VF_PARTIAL,&vcc->flags))
2679 	{
2680 		IF_EVENT(printk("ia: not partially allocated resources\n");)
2681 		vcc->dev_data = NULL;
2682 	}
2683 	iadev = INPH_IA_DEV(vcc->dev);
2684 	if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC)
2685 	{
2686 		IF_EVENT(printk("iphase open: unspec part\n");)
2687 		set_bit(ATM_VF_ADDR,&vcc->flags);
2688 	}
2689 	if (vcc->qos.aal != ATM_AAL5)
2690 		return -EINVAL;
2691 	IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n",
2692                                  vcc->dev->number, vcc->vpi, vcc->vci);)
2693 
2694 	/* Device dependent initialization */
2695 	ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL);
2696 	if (!ia_vcc) return -ENOMEM;
2697 	vcc->dev_data = ia_vcc;
2698 
2699 	if ((error = open_rx(vcc)))
2700 	{
2701 		IF_EVENT(printk("iadev: error in open_rx, closing\n");)
2702 		ia_close(vcc);
2703 		return error;
2704 	}
2705 
2706 	if ((error = open_tx(vcc)))
2707 	{
2708 		IF_EVENT(printk("iadev: error in open_tx, closing\n");)
2709 		ia_close(vcc);
2710 		return error;
2711 	}
2712 
2713 	set_bit(ATM_VF_READY,&vcc->flags);
2714 
2715 #if 0
2716         {
2717            static u8 first = 1;
2718            if (first) {
2719               ia_timer.expires = jiffies + 3*HZ;
2720               add_timer(&ia_timer);
2721               first = 0;
2722            }
2723         }
2724 #endif
2725 	IF_EVENT(printk("ia open returning\n");)
2726 	return 0;
2727 }
2728 
2729 static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
2730 {
2731 	IF_EVENT(printk(">ia_change_qos\n");)
2732 	return 0;
2733 }
2734 
2735 static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
2736 {
2737    IA_CMDBUF ia_cmds;
2738    IADEV *iadev;
2739    int i, board;
2740    u16 __user *tmps;
2741    IF_EVENT(printk(">ia_ioctl\n");)
2742    if (cmd != IA_CMD) {
2743       if (!dev->phy->ioctl) return -EINVAL;
2744       return dev->phy->ioctl(dev,cmd,arg);
2745    }
2746    if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT;
2747    board = ia_cmds.status;
2748    if ((board < 0) || (board > iadev_count))
2749          board = 0;
2750    iadev = ia_dev[board];
2751    switch (ia_cmds.cmd) {
2752    case MEMDUMP:
2753    {
2754 	switch (ia_cmds.sub_cmd) {
2755        	  case MEMDUMP_DEV:
2756 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2757 	     if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV)))
2758                 return -EFAULT;
2759              ia_cmds.status = 0;
2760              break;
2761           case MEMDUMP_SEGREG:
2762 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2763              tmps = (u16 __user *)ia_cmds.buf;
2764              for(i=0; i<0x80; i+=2, tmps++)
2765                 if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT;
2766              ia_cmds.status = 0;
2767              ia_cmds.len = 0x80;
2768              break;
2769           case MEMDUMP_REASSREG:
2770 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2771              tmps = (u16 __user *)ia_cmds.buf;
2772              for(i=0; i<0x80; i+=2, tmps++)
2773                 if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT;
2774              ia_cmds.status = 0;
2775              ia_cmds.len = 0x80;
2776              break;
2777           case MEMDUMP_FFL:
2778           {
2779              ia_regs_t       *regs_local;
2780              ffredn_t        *ffL;
2781              rfredn_t        *rfL;
2782 
2783 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2784 	     regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL);
2785 	     if (!regs_local) return -ENOMEM;
2786 	     ffL = &regs_local->ffredn;
2787 	     rfL = &regs_local->rfredn;
2788              /* Copy real rfred registers into the local copy */
2789  	     for (i=0; i<(sizeof (rfredn_t))/4; i++)
2790                 ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff;
2791              	/* Copy real ffred registers into the local copy */
2792 	     for (i=0; i<(sizeof (ffredn_t))/4; i++)
2793                 ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff;
2794 
2795              if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) {
2796                 kfree(regs_local);
2797                 return -EFAULT;
2798              }
2799              kfree(regs_local);
2800              printk("Board %d registers dumped\n", board);
2801              ia_cmds.status = 0;
2802 	 }
2803     	     break;
2804          case READ_REG:
2805          {
2806 	     if (!capable(CAP_NET_ADMIN)) return -EPERM;
2807              desc_dbg(iadev);
2808              ia_cmds.status = 0;
2809          }
2810              break;
2811          case 0x6:
2812          {
2813              ia_cmds.status = 0;
2814              printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog));
2815              printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q));
2816          }
2817              break;
2818          case 0x8:
2819          {
2820              struct k_sonet_stats *stats;
2821              stats = &PRIV(_ia_dev[board])->sonet_stats;
2822              printk("section_bip: %d\n", atomic_read(&stats->section_bip));
2823              printk("line_bip   : %d\n", atomic_read(&stats->line_bip));
2824              printk("path_bip   : %d\n", atomic_read(&stats->path_bip));
2825              printk("line_febe  : %d\n", atomic_read(&stats->line_febe));
2826              printk("path_febe  : %d\n", atomic_read(&stats->path_febe));
2827              printk("corr_hcs   : %d\n", atomic_read(&stats->corr_hcs));
2828              printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
2829              printk("tx_cells   : %d\n", atomic_read(&stats->tx_cells));
2830              printk("rx_cells   : %d\n", atomic_read(&stats->rx_cells));
2831          }
2832             ia_cmds.status = 0;
2833             break;
2834          case 0x9:
2835 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2836             for (i = 1; i <= iadev->num_rx_desc; i++)
2837                free_desc(_ia_dev[board], i);
2838             writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD),
2839                                             iadev->reass_reg+REASS_MASK_REG);
2840             iadev->rxing = 1;
2841 
2842             ia_cmds.status = 0;
2843             break;
2844 
2845          case 0xb:
2846 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2847             IaFrontEndIntr(iadev);
2848             break;
2849          case 0xa:
2850 	    if (!capable(CAP_NET_ADMIN)) return -EPERM;
2851          {
2852              ia_cmds.status = 0;
2853              IADebugFlag = ia_cmds.maddr;
2854              printk("New debug option loaded\n");
2855          }
2856              break;
2857          default:
2858              ia_cmds.status = 0;
2859              break;
2860       }
2861    }
2862       break;
2863    default:
2864       break;
2865 
2866    }
2867    return 0;
2868 }
2869 
2870 static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
2871 	void __user *optval, int optlen)
2872 {
2873 	IF_EVENT(printk(">ia_getsockopt\n");)
2874 	return -EINVAL;
2875 }
2876 
2877 static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
2878 	void __user *optval, int optlen)
2879 {
2880 	IF_EVENT(printk(">ia_setsockopt\n");)
2881 	return -EINVAL;
2882 }
2883 
2884 static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
2885         IADEV *iadev;
2886         struct dle *wr_ptr;
2887         struct tx_buf_desc __iomem *buf_desc_ptr;
2888         int desc;
2889         int comp_code;
2890         int total_len;
2891         struct cpcs_trailer *trailer;
2892         struct ia_vcc *iavcc;
2893 
2894         iadev = INPH_IA_DEV(vcc->dev);
2895         iavcc = INPH_IA_VCC(vcc);
2896         if (!iavcc->txing) {
2897            printk("discard packet on closed VC\n");
2898            if (vcc->pop)
2899 		vcc->pop(vcc, skb);
2900            else
2901 		dev_kfree_skb_any(skb);
2902 	   return 0;
2903         }
2904 
2905         if (skb->len > iadev->tx_buf_sz - 8) {
2906            printk("Transmit size over tx buffer size\n");
2907            if (vcc->pop)
2908                  vcc->pop(vcc, skb);
2909            else
2910                  dev_kfree_skb_any(skb);
2911           return 0;
2912         }
2913         if ((u32)skb->data & 3) {
2914            printk("Misaligned SKB\n");
2915            if (vcc->pop)
2916                  vcc->pop(vcc, skb);
2917            else
2918                  dev_kfree_skb_any(skb);
2919            return 0;
2920         }
2921 	/* Get a descriptor number from our free descriptor queue
2922 	   We get the descr number from the TCQ now, since I am using
2923 	   the TCQ as a free buffer queue. Initially TCQ will be
2924 	   initialized with all the descriptors and is hence, full.
2925 	*/
2926 	desc = get_desc (iadev, iavcc);
2927 	if (desc == 0xffff)
2928 	    return 1;
2929 	comp_code = desc >> 13;
2930 	desc &= 0x1fff;
2931 
2932 	if ((desc == 0) || (desc > iadev->num_tx_desc))
2933 	{
2934 		IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
2935                 atomic_inc(&vcc->stats->tx);
2936 		if (vcc->pop)
2937 		    vcc->pop(vcc, skb);
2938 		else
2939 		    dev_kfree_skb_any(skb);
2940 		return 0;   /* return SUCCESS */
2941 	}
2942 
2943 	if (comp_code)
2944 	{
2945 	    IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n",
2946                                                             desc, comp_code);)
2947 	}
2948 
2949         /* remember the desc and vcc mapping */
2950         iavcc->vc_desc_cnt++;
2951         iadev->desc_tbl[desc-1].iavcc = iavcc;
2952         iadev->desc_tbl[desc-1].txskb = skb;
2953         IA_SKB_STATE(skb) = 0;
2954 
2955         iadev->ffL.tcq_rd += 2;
2956         if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed)
2957 	  	iadev->ffL.tcq_rd  = iadev->ffL.tcq_st;
2958 	writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR);
2959 
2960 	/* Put the descriptor number in the packet ready queue
2961 		and put the updated write pointer in the DLE field
2962 	*/
2963 	*(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc;
2964 
2965  	iadev->ffL.prq_wr += 2;
2966         if (iadev->ffL.prq_wr > iadev->ffL.prq_ed)
2967                 iadev->ffL.prq_wr = iadev->ffL.prq_st;
2968 
2969 	/* Figure out the exact length of the packet and padding required to
2970            make it  aligned on a 48 byte boundary.  */
2971 	total_len = skb->len + sizeof(struct cpcs_trailer);
2972 	total_len = ((total_len + 47) / 48) * 48;
2973 	IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);)
2974 
2975 	/* Put the packet in a tx buffer */
2976 	trailer = iadev->tx_buf[desc-1].cpcs;
2977         IF_TX(printk("Sent: skb = 0x%x skb->data: 0x%x len: %d, desc: %d\n",
2978                   (u32)skb, (u32)skb->data, skb->len, desc);)
2979 	trailer->control = 0;
2980         /*big endian*/
2981 	trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8);
2982 	trailer->crc32 = 0;	/* not needed - dummy bytes */
2983 
2984 	/* Display the packet */
2985 	IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n",
2986                                                         skb->len, tcnter++);
2987         xdump(skb->data, skb->len, "TX: ");
2988         printk("\n");)
2989 
2990 	/* Build the buffer descriptor */
2991 	buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE;
2992 	buf_desc_ptr += desc;	/* points to the corresponding entry */
2993 	buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT;
2994 	/* Huh ? p.115 of users guide describes this as a read-only register */
2995         writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG);
2996 	buf_desc_ptr->vc_index = vcc->vci;
2997 	buf_desc_ptr->bytes = total_len;
2998 
2999         if (vcc->qos.txtp.traffic_class == ATM_ABR)
3000 	   clear_lockup (vcc, iadev);
3001 
3002 	/* Build the DLE structure */
3003 	wr_ptr = iadev->tx_dle_q.write;
3004 	memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr));
3005 	wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data,
3006 		skb->len, PCI_DMA_TODEVICE);
3007 	wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) |
3008                                                   buf_desc_ptr->buf_start_lo;
3009 	/* wr_ptr->bytes = swap(total_len);	didn't seem to affect ?? */
3010 	wr_ptr->bytes = skb->len;
3011 
3012         /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */
3013         if ((wr_ptr->bytes >> 2) == 0xb)
3014            wr_ptr->bytes = 0x30;
3015 
3016 	wr_ptr->mode = TX_DLE_PSI;
3017 	wr_ptr->prq_wr_ptr_data = 0;
3018 
3019 	/* end is not to be used for the DLE q */
3020 	if (++wr_ptr == iadev->tx_dle_q.end)
3021 		wr_ptr = iadev->tx_dle_q.start;
3022 
3023         /* Build trailer dle */
3024         wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr;
3025         wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) |
3026           buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer);
3027 
3028         wr_ptr->bytes = sizeof(struct cpcs_trailer);
3029         wr_ptr->mode = DMA_INT_ENABLE;
3030         wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr;
3031 
3032         /* end is not to be used for the DLE q */
3033         if (++wr_ptr == iadev->tx_dle_q.end)
3034                 wr_ptr = iadev->tx_dle_q.start;
3035 
3036 	iadev->tx_dle_q.write = wr_ptr;
3037         ATM_DESC(skb) = vcc->vci;
3038         skb_queue_tail(&iadev->tx_dma_q, skb);
3039 
3040         atomic_inc(&vcc->stats->tx);
3041         iadev->tx_pkt_cnt++;
3042 	/* Increment transaction counter */
3043 	writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
3044 
3045 #if 0
3046         /* add flow control logic */
3047         if (atomic_read(&vcc->stats->tx) % 20 == 0) {
3048           if (iavcc->vc_desc_cnt > 10) {
3049              vcc->tx_quota =  vcc->tx_quota * 3 / 4;
3050             printk("Tx1:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3051               iavcc->flow_inc = -1;
3052               iavcc->saved_tx_quota = vcc->tx_quota;
3053            } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) {
3054              // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4;
3055              printk("Tx2:  vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
3056               iavcc->flow_inc = 0;
3057            }
3058         }
3059 #endif
3060 	IF_TX(printk("ia send done\n");)
3061 	return 0;
3062 }
3063 
3064 static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb)
3065 {
3066         IADEV *iadev;
3067         struct ia_vcc *iavcc;
3068         unsigned long flags;
3069 
3070         iadev = INPH_IA_DEV(vcc->dev);
3071         iavcc = INPH_IA_VCC(vcc);
3072         if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer))))
3073         {
3074             if (!skb)
3075                 printk(KERN_CRIT "null skb in ia_send\n");
3076             else dev_kfree_skb_any(skb);
3077             return -EINVAL;
3078         }
3079         spin_lock_irqsave(&iadev->tx_lock, flags);
3080         if (!test_bit(ATM_VF_READY,&vcc->flags)){
3081             dev_kfree_skb_any(skb);
3082             spin_unlock_irqrestore(&iadev->tx_lock, flags);
3083             return -EINVAL;
3084         }
3085         ATM_SKB(skb)->vcc = vcc;
3086 
3087         if (skb_peek(&iadev->tx_backlog)) {
3088            skb_queue_tail(&iadev->tx_backlog, skb);
3089         }
3090         else {
3091            if (ia_pkt_tx (vcc, skb)) {
3092               skb_queue_tail(&iadev->tx_backlog, skb);
3093            }
3094         }
3095         spin_unlock_irqrestore(&iadev->tx_lock, flags);
3096         return 0;
3097 
3098 }
3099 
3100 static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
3101 {
3102   int   left = *pos, n;
3103   char  *tmpPtr;
3104   IADEV *iadev = INPH_IA_DEV(dev);
3105   if(!left--) {
3106      if (iadev->phy_type == FE_25MBIT_PHY) {
3107        n = sprintf(page, "  Board Type         :  Iphase5525-1KVC-128K\n");
3108        return n;
3109      }
3110      if (iadev->phy_type == FE_DS3_PHY)
3111         n = sprintf(page, "  Board Type         :  Iphase-ATM-DS3");
3112      else if (iadev->phy_type == FE_E3_PHY)
3113         n = sprintf(page, "  Board Type         :  Iphase-ATM-E3");
3114      else if (iadev->phy_type == FE_UTP_OPTION)
3115          n = sprintf(page, "  Board Type         :  Iphase-ATM-UTP155");
3116      else
3117         n = sprintf(page, "  Board Type         :  Iphase-ATM-OC3");
3118      tmpPtr = page + n;
3119      if (iadev->pci_map_size == 0x40000)
3120         n += sprintf(tmpPtr, "-1KVC-");
3121      else
3122         n += sprintf(tmpPtr, "-4KVC-");
3123      tmpPtr = page + n;
3124      if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M)
3125         n += sprintf(tmpPtr, "1M  \n");
3126      else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K)
3127         n += sprintf(tmpPtr, "512K\n");
3128      else
3129        n += sprintf(tmpPtr, "128K\n");
3130      return n;
3131   }
3132   if (!left) {
3133      return  sprintf(page, "  Number of Tx Buffer:  %u\n"
3134                            "  Size of Tx Buffer  :  %u\n"
3135                            "  Number of Rx Buffer:  %u\n"
3136                            "  Size of Rx Buffer  :  %u\n"
3137                            "  Packets Receiverd  :  %u\n"
3138                            "  Packets Transmitted:  %u\n"
3139                            "  Cells Received     :  %u\n"
3140                            "  Cells Transmitted  :  %u\n"
3141                            "  Board Dropped Cells:  %u\n"
3142                            "  Board Dropped Pkts :  %u\n",
3143                            iadev->num_tx_desc,  iadev->tx_buf_sz,
3144                            iadev->num_rx_desc,  iadev->rx_buf_sz,
3145                            iadev->rx_pkt_cnt,   iadev->tx_pkt_cnt,
3146                            iadev->rx_cell_cnt, iadev->tx_cell_cnt,
3147                            iadev->drop_rxcell, iadev->drop_rxpkt);
3148   }
3149   return 0;
3150 }
3151 
3152 static const struct atmdev_ops ops = {
3153 	.open		= ia_open,
3154 	.close		= ia_close,
3155 	.ioctl		= ia_ioctl,
3156 	.getsockopt	= ia_getsockopt,
3157 	.setsockopt	= ia_setsockopt,
3158 	.send		= ia_send,
3159 	.phy_put	= ia_phy_put,
3160 	.phy_get	= ia_phy_get,
3161 	.change_qos	= ia_change_qos,
3162 	.proc_read	= ia_proc_read,
3163 	.owner		= THIS_MODULE,
3164 };
3165 
3166 static int __devinit ia_init_one(struct pci_dev *pdev,
3167 				 const struct pci_device_id *ent)
3168 {
3169 	struct atm_dev *dev;
3170 	IADEV *iadev;
3171         unsigned long flags;
3172 	int ret;
3173 
3174 	iadev = kzalloc(sizeof(*iadev), GFP_KERNEL);
3175 	if (!iadev) {
3176 		ret = -ENOMEM;
3177 		goto err_out;
3178 	}
3179 
3180 	iadev->pci = pdev;
3181 
3182 	IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n",
3183 		pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));)
3184 	if (pci_enable_device(pdev)) {
3185 		ret = -ENODEV;
3186 		goto err_out_free_iadev;
3187 	}
3188 	dev = atm_dev_register(DEV_LABEL, &ops, -1, NULL);
3189 	if (!dev) {
3190 		ret = -ENOMEM;
3191 		goto err_out_disable_dev;
3192 	}
3193 	dev->dev_data = iadev;
3194 	IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);)
3195 	IF_INIT(printk("dev_id = 0x%x iadev->LineRate = %d \n", (u32)dev,
3196 		iadev->LineRate);)
3197 
3198 	ia_dev[iadev_count] = iadev;
3199 	_ia_dev[iadev_count] = dev;
3200 	iadev_count++;
3201 	spin_lock_init(&iadev->misc_lock);
3202 	/* First fixes first. I don't want to think about this now. */
3203 	spin_lock_irqsave(&iadev->misc_lock, flags);
3204 	if (ia_init(dev) || ia_start(dev)) {
3205 		IF_INIT(printk("IA register failed!\n");)
3206 		iadev_count--;
3207 		ia_dev[iadev_count] = NULL;
3208 		_ia_dev[iadev_count] = NULL;
3209 		spin_unlock_irqrestore(&iadev->misc_lock, flags);
3210 		ret = -EINVAL;
3211 		goto err_out_deregister_dev;
3212 	}
3213 	spin_unlock_irqrestore(&iadev->misc_lock, flags);
3214 	IF_EVENT(printk("iadev_count = %d\n", iadev_count);)
3215 
3216 	iadev->next_board = ia_boards;
3217 	ia_boards = dev;
3218 
3219 	pci_set_drvdata(pdev, dev);
3220 
3221 	return 0;
3222 
3223 err_out_deregister_dev:
3224 	atm_dev_deregister(dev);
3225 err_out_disable_dev:
3226 	pci_disable_device(pdev);
3227 err_out_free_iadev:
3228 	kfree(iadev);
3229 err_out:
3230 	return ret;
3231 }
3232 
3233 static void __devexit ia_remove_one(struct pci_dev *pdev)
3234 {
3235 	struct atm_dev *dev = pci_get_drvdata(pdev);
3236 	IADEV *iadev = INPH_IA_DEV(dev);
3237 
3238 	ia_phy_put(dev, ia_phy_get(dev,0x10) & ~(0x4), 0x10);
3239 	udelay(1);
3240 
3241 	/* De-register device */
3242       	free_irq(iadev->irq, dev);
3243 	iadev_count--;
3244 	ia_dev[iadev_count] = NULL;
3245 	_ia_dev[iadev_count] = NULL;
3246 	IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);)
3247 	atm_dev_deregister(dev);
3248 
3249       	iounmap(iadev->base);
3250 	pci_disable_device(pdev);
3251 
3252 	ia_free_rx(iadev);
3253 	ia_free_tx(iadev);
3254 
3255       	kfree(iadev);
3256 }
3257 
3258 static struct pci_device_id ia_pci_tbl[] = {
3259 	{ PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, },
3260 	{ PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, },
3261 	{ 0,}
3262 };
3263 MODULE_DEVICE_TABLE(pci, ia_pci_tbl);
3264 
3265 static struct pci_driver ia_driver = {
3266 	.name =         DEV_LABEL,
3267 	.id_table =     ia_pci_tbl,
3268 	.probe =        ia_init_one,
3269 	.remove =       __devexit_p(ia_remove_one),
3270 };
3271 
3272 static int __init ia_module_init(void)
3273 {
3274 	int ret;
3275 
3276 	ret = pci_register_driver(&ia_driver);
3277 	if (ret >= 0) {
3278 		ia_timer.expires = jiffies + 3*HZ;
3279 		add_timer(&ia_timer);
3280 	} else
3281 		printk(KERN_ERR DEV_LABEL ": no adapter found\n");
3282 	return ret;
3283 }
3284 
3285 static void __exit ia_module_exit(void)
3286 {
3287 	pci_unregister_driver(&ia_driver);
3288 
3289         del_timer(&ia_timer);
3290 }
3291 
3292 module_init(ia_module_init);
3293 module_exit(ia_module_exit);
3294