xref: /openbmc/qemu/hw/net/fsl_etsec/rings.c (revision ad30c0b0)
1 /*
2  * QEMU Freescale eTSEC Emulator
3  *
4  * Copyright (c) 2011-2013 AdaCore
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "net/checksum.h"
25 
26 #include "etsec.h"
27 #include "registers.h"
28 
29 /* #define ETSEC_RING_DEBUG */
30 /* #define HEX_DUMP */
31 /* #define DEBUG_BD */
32 
33 #ifdef ETSEC_RING_DEBUG
34 static const int debug_etsec = 1;
35 #else
36 static const int debug_etsec;
37 #endif
38 
39 #define RING_DEBUG(fmt, ...) do {              \
40  if (debug_etsec) {                            \
41         qemu_log(fmt , ## __VA_ARGS__);        \
42     }                                          \
43     } while (0)
44 
45 #ifdef DEBUG_BD
46 
47 static void print_tx_bd_flags(uint16_t flags)
48 {
49     qemu_log("      Ready: %d\n", !!(flags & BD_TX_READY));
50     qemu_log("      PAD/CRC: %d\n", !!(flags & BD_TX_PADCRC));
51     qemu_log("      Wrap: %d\n", !!(flags & BD_WRAP));
52     qemu_log("      Interrupt: %d\n", !!(flags & BD_INTERRUPT));
53     qemu_log("      Last in frame: %d\n", !!(flags & BD_LAST));
54     qemu_log("      Tx CRC: %d\n", !!(flags & BD_TX_TC));
55     qemu_log("      User-defined preamble / defer: %d\n",
56            !!(flags & BD_TX_PREDEF));
57     qemu_log("      Huge frame enable / Late collision: %d\n",
58            !!(flags & BD_TX_HFELC));
59     qemu_log("      Control frame / Retransmission Limit: %d\n",
60            !!(flags & BD_TX_CFRL));
61     qemu_log("      Retry count: %d\n",
62            (flags >> BD_TX_RC_OFFSET) & BD_TX_RC_MASK);
63     qemu_log("      Underrun / TCP/IP off-load enable: %d\n",
64            !!(flags & BD_TX_TOEUN));
65     qemu_log("      Truncation: %d\n", !!(flags & BD_TX_TR));
66 }
67 
68 static void print_rx_bd_flags(uint16_t flags)
69 {
70     qemu_log("      Empty: %d\n", !!(flags & BD_RX_EMPTY));
71     qemu_log("      Receive software ownership: %d\n", !!(flags & BD_RX_RO1));
72     qemu_log("      Wrap: %d\n", !!(flags & BD_WRAP));
73     qemu_log("      Interrupt: %d\n", !!(flags & BD_INTERRUPT));
74     qemu_log("      Last in frame: %d\n", !!(flags & BD_LAST));
75     qemu_log("      First in frame: %d\n", !!(flags & BD_RX_FIRST));
76     qemu_log("      Miss: %d\n", !!(flags & BD_RX_MISS));
77     qemu_log("      Broadcast: %d\n", !!(flags & BD_RX_BROADCAST));
78     qemu_log("      Multicast: %d\n", !!(flags & BD_RX_MULTICAST));
79     qemu_log("      Rx frame length violation: %d\n", !!(flags & BD_RX_LG));
80     qemu_log("      Rx non-octet aligned frame: %d\n", !!(flags & BD_RX_NO));
81     qemu_log("      Short frame: %d\n", !!(flags & BD_RX_SH));
82     qemu_log("      Rx CRC Error: %d\n", !!(flags & BD_RX_CR));
83     qemu_log("      Overrun: %d\n", !!(flags & BD_RX_OV));
84     qemu_log("      Truncation: %d\n", !!(flags & BD_RX_TR));
85 }
86 
87 
88 static void print_bd(eTSEC_rxtx_bd bd, int mode, uint32_t index)
89 {
90     qemu_log("eTSEC %s Data Buffer Descriptor (%u)\n",
91            mode == eTSEC_TRANSMIT ? "Transmit" : "Receive",
92            index);
93     qemu_log("   Flags   : 0x%04x\n", bd.flags);
94     if (mode == eTSEC_TRANSMIT) {
95         print_tx_bd_flags(bd.flags);
96     } else {
97         print_rx_bd_flags(bd.flags);
98     }
99     qemu_log("   Length  : 0x%04x\n", bd.length);
100     qemu_log("   Pointer : 0x%08x\n", bd.bufptr);
101 }
102 
103 #endif  /* DEBUG_BD */
104 
105 static void read_buffer_descriptor(eTSEC         *etsec,
106                                    hwaddr         addr,
107                                    eTSEC_rxtx_bd *bd)
108 {
109     assert(bd != NULL);
110 
111     RING_DEBUG("READ Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr);
112     cpu_physical_memory_read(addr,
113                              bd,
114                              sizeof(eTSEC_rxtx_bd));
115 
116     if (etsec->regs[DMACTRL].value & DMACTRL_LE) {
117         bd->flags  = lduw_le_p(&bd->flags);
118         bd->length = lduw_le_p(&bd->length);
119         bd->bufptr = ldl_le_p(&bd->bufptr);
120     } else {
121         bd->flags  = lduw_be_p(&bd->flags);
122         bd->length = lduw_be_p(&bd->length);
123         bd->bufptr = ldl_be_p(&bd->bufptr);
124     }
125 }
126 
127 static void write_buffer_descriptor(eTSEC         *etsec,
128                                     hwaddr         addr,
129                                     eTSEC_rxtx_bd *bd)
130 {
131     assert(bd != NULL);
132 
133     if (etsec->regs[DMACTRL].value & DMACTRL_LE) {
134         stw_le_p(&bd->flags, bd->flags);
135         stw_le_p(&bd->length, bd->length);
136         stl_le_p(&bd->bufptr, bd->bufptr);
137     } else {
138         stw_be_p(&bd->flags, bd->flags);
139         stw_be_p(&bd->length, bd->length);
140         stl_be_p(&bd->bufptr, bd->bufptr);
141     }
142 
143     RING_DEBUG("Write Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr);
144     cpu_physical_memory_write(addr,
145                               bd,
146                               sizeof(eTSEC_rxtx_bd));
147 }
148 
149 static void ievent_set(eTSEC    *etsec,
150                        uint32_t  flags)
151 {
152     etsec->regs[IEVENT].value |= flags;
153 
154     if ((flags & IEVENT_TXB && etsec->regs[IMASK].value & IMASK_TXBEN)
155         || (flags & IEVENT_TXF && etsec->regs[IMASK].value & IMASK_TXFEN)) {
156         qemu_irq_raise(etsec->tx_irq);
157         RING_DEBUG("%s Raise Tx IRQ\n", __func__);
158     }
159 
160     if ((flags & IEVENT_RXB && etsec->regs[IMASK].value & IMASK_RXBEN)
161         || (flags & IEVENT_RXF && etsec->regs[IMASK].value & IMASK_RXFEN)) {
162         qemu_irq_raise(etsec->rx_irq);
163         RING_DEBUG("%s Raise Rx IRQ\n", __func__);
164     }
165 }
166 
167 static void tx_padding_and_crc(eTSEC *etsec, uint32_t min_frame_len)
168 {
169     int add = min_frame_len - etsec->tx_buffer_len;
170 
171     /* Padding */
172     if (add > 0) {
173         RING_DEBUG("pad:%u\n", add);
174         etsec->tx_buffer = g_realloc(etsec->tx_buffer,
175                                         etsec->tx_buffer_len + add);
176 
177         memset(etsec->tx_buffer + etsec->tx_buffer_len, 0x0, add);
178         etsec->tx_buffer_len += add;
179     }
180 
181     /* Never add CRC in QEMU */
182 }
183 
184 static void process_tx_fcb(eTSEC *etsec)
185 {
186     uint8_t flags = (uint8_t)(*etsec->tx_buffer);
187     /* L3 header offset from start of frame */
188     uint8_t l3_header_offset = (uint8_t)*(etsec->tx_buffer + 3);
189     /* L4 header offset from start of L3 header */
190     uint8_t l4_header_offset = (uint8_t)*(etsec->tx_buffer + 2);
191     /* L3 header */
192     uint8_t *l3_header = etsec->tx_buffer + 8 + l3_header_offset;
193     /* L4 header */
194     uint8_t *l4_header = l3_header + l4_header_offset;
195 
196     /* if packet is IP4 and IP checksum is requested */
197     if (flags & FCB_TX_IP && flags & FCB_TX_CIP) {
198         /* do IP4 checksum (TODO This function does TCP/UDP checksum
199          * but not sure if it also does IP4 checksum.) */
200         net_checksum_calculate(etsec->tx_buffer + 8,
201                 etsec->tx_buffer_len - 8);
202     }
203     /* TODO Check the correct usage of the PHCS field of the FCB in case the NPH
204      * flag is on */
205 
206     /* if packet is IP4 and TCP or UDP */
207     if (flags & FCB_TX_IP && flags & FCB_TX_TUP) {
208         /* if UDP */
209         if (flags & FCB_TX_UDP) {
210             /* if checksum is requested */
211             if (flags & FCB_TX_CTU) {
212                 /* do UDP checksum */
213 
214                 net_checksum_calculate(etsec->tx_buffer + 8,
215                         etsec->tx_buffer_len - 8);
216             } else {
217                 /* set checksum field to 0 */
218                 l4_header[6] = 0;
219                 l4_header[7] = 0;
220             }
221         } else if (flags & FCB_TX_CTU) { /* if TCP and checksum is requested */
222             /* do TCP checksum */
223             net_checksum_calculate(etsec->tx_buffer + 8,
224                                    etsec->tx_buffer_len - 8);
225         }
226     }
227 }
228 
229 static void process_tx_bd(eTSEC         *etsec,
230                           eTSEC_rxtx_bd *bd)
231 {
232     uint8_t *tmp_buff = NULL;
233     hwaddr tbdbth     = (hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32;
234 
235     if (bd->length == 0) {
236         /* ERROR */
237         return;
238     }
239 
240     if (etsec->tx_buffer_len == 0) {
241         /* It's the first BD */
242         etsec->first_bd = *bd;
243     }
244 
245     /* TODO: if TxBD[TOE/UN] skip the Tx Frame Control Block*/
246 
247     /* Load this Data Buffer */
248     etsec->tx_buffer = g_realloc(etsec->tx_buffer,
249                                     etsec->tx_buffer_len + bd->length);
250     tmp_buff = etsec->tx_buffer + etsec->tx_buffer_len;
251     cpu_physical_memory_read(bd->bufptr + tbdbth, tmp_buff, bd->length);
252 
253     /* Update buffer length */
254     etsec->tx_buffer_len += bd->length;
255 
256 
257     if (etsec->tx_buffer_len != 0 && (bd->flags & BD_LAST)) {
258         if (etsec->regs[MACCFG1].value & MACCFG1_TX_EN) {
259             /* MAC Transmit enabled */
260 
261             /* Process offload Tx FCB */
262             if (etsec->first_bd.flags & BD_TX_TOEUN) {
263                 process_tx_fcb(etsec);
264             }
265 
266             if (etsec->first_bd.flags & BD_TX_PADCRC
267                 || etsec->regs[MACCFG2].value & MACCFG2_PADCRC) {
268 
269                 /* Padding and CRC (Padding implies CRC) */
270                 tx_padding_and_crc(etsec, 64);
271 
272             } else if (etsec->first_bd.flags & BD_TX_TC
273                        || etsec->regs[MACCFG2].value & MACCFG2_CRC_EN) {
274 
275                 /* Only CRC */
276                 /* Never add CRC in QEMU */
277             }
278 
279 #if defined(HEX_DUMP)
280             qemu_log("eTSEC Send packet size:%d\n", etsec->tx_buffer_len);
281             qemu_hexdump(etsec->tx_buffer, stderr, "", etsec->tx_buffer_len);
282 #endif  /* ETSEC_RING_DEBUG */
283 
284             if (etsec->first_bd.flags & BD_TX_TOEUN) {
285                 qemu_send_packet(qemu_get_queue(etsec->nic),
286                         etsec->tx_buffer + 8,
287                         etsec->tx_buffer_len - 8);
288             } else {
289                 qemu_send_packet(qemu_get_queue(etsec->nic),
290                         etsec->tx_buffer,
291                         etsec->tx_buffer_len);
292             }
293 
294         }
295 
296         etsec->tx_buffer_len = 0;
297 
298         if (bd->flags & BD_INTERRUPT) {
299             ievent_set(etsec, IEVENT_TXF);
300         }
301     } else {
302         if (bd->flags & BD_INTERRUPT) {
303             ievent_set(etsec, IEVENT_TXB);
304         }
305     }
306 
307     /* Update DB flags */
308 
309     /* Clear Ready */
310     bd->flags &= ~BD_TX_READY;
311 
312     /* Clear Defer */
313     bd->flags &= ~BD_TX_PREDEF;
314 
315     /* Clear Late Collision */
316     bd->flags &= ~BD_TX_HFELC;
317 
318     /* Clear Retransmission Limit */
319     bd->flags &= ~BD_TX_CFRL;
320 
321     /* Clear Retry Count */
322     bd->flags &= ~(BD_TX_RC_MASK << BD_TX_RC_OFFSET);
323 
324     /* Clear Underrun */
325     bd->flags &= ~BD_TX_TOEUN;
326 
327     /* Clear Truncation */
328     bd->flags &= ~BD_TX_TR;
329 }
330 
331 void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr)
332 {
333     hwaddr        ring_base = 0;
334     hwaddr        bd_addr   = 0;
335     eTSEC_rxtx_bd bd;
336     uint16_t      bd_flags;
337 
338     if (!(etsec->regs[MACCFG1].value & MACCFG1_TX_EN)) {
339         RING_DEBUG("%s: MAC Transmit not enabled\n", __func__);
340         return;
341     }
342 
343     ring_base = (hwaddr)(etsec->regs[TBASEH].value & 0xF) << 32;
344     ring_base += etsec->regs[TBASE0 + ring_nbr].value & ~0x7;
345     bd_addr    = etsec->regs[TBPTR0 + ring_nbr].value & ~0x7;
346 
347     do {
348         read_buffer_descriptor(etsec, bd_addr, &bd);
349 
350 #ifdef DEBUG_BD
351         print_bd(bd,
352                  eTSEC_TRANSMIT,
353                  (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd));
354 
355 #endif  /* DEBUG_BD */
356 
357         /* Save flags before BD update */
358         bd_flags = bd.flags;
359 
360         if (bd_flags & BD_TX_READY) {
361             process_tx_bd(etsec, &bd);
362 
363             /* Write back BD after update */
364             write_buffer_descriptor(etsec, bd_addr, &bd);
365         }
366 
367         /* Wrap or next BD */
368         if (bd_flags & BD_WRAP) {
369             bd_addr = ring_base;
370         } else {
371             bd_addr += sizeof(eTSEC_rxtx_bd);
372         }
373 
374     } while (bd_addr != ring_base);
375 
376     bd_addr = ring_base;
377 
378     /* Save the Buffer Descriptor Pointers to current bd */
379     etsec->regs[TBPTR0 + ring_nbr].value = bd_addr;
380 
381     /* Set transmit halt THLTx */
382     etsec->regs[TSTAT].value |= 1 << (31 - ring_nbr);
383 }
384 
385 static void fill_rx_bd(eTSEC          *etsec,
386                        eTSEC_rxtx_bd  *bd,
387                        const uint8_t **buf,
388                        size_t         *size)
389 {
390     uint16_t to_write;
391     hwaddr   bufptr = bd->bufptr +
392         ((hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32);
393     uint8_t  padd[etsec->rx_padding];
394     uint8_t  rem;
395 
396     RING_DEBUG("eTSEC fill Rx buffer @ 0x%016" HWADDR_PRIx
397                " size:%zu(padding + crc:%u) + fcb:%u\n",
398                bufptr, *size, etsec->rx_padding, etsec->rx_fcb_size);
399 
400     bd->length = 0;
401 
402     /* This operation will only write FCB */
403     if (etsec->rx_fcb_size != 0) {
404 
405         cpu_physical_memory_write(bufptr, etsec->rx_fcb, etsec->rx_fcb_size);
406 
407         bufptr             += etsec->rx_fcb_size;
408         bd->length         += etsec->rx_fcb_size;
409         etsec->rx_fcb_size  = 0;
410 
411     }
412 
413     /* We remove padding from the computation of to_write because it is not
414      * allocated in the buffer.
415      */
416     to_write = MIN(*size - etsec->rx_padding,
417                    etsec->regs[MRBLR].value - etsec->rx_fcb_size);
418 
419     /* This operation can only write packet data and no padding */
420     if (to_write > 0) {
421         cpu_physical_memory_write(bufptr, *buf, to_write);
422 
423         *buf   += to_write;
424         bufptr += to_write;
425         *size  -= to_write;
426 
427         bd->flags  &= ~BD_RX_EMPTY;
428         bd->length += to_write;
429     }
430 
431     if (*size == etsec->rx_padding) {
432         /* The remaining bytes are only for padding which is not actually
433          * allocated in the data buffer.
434          */
435 
436         rem = MIN(etsec->regs[MRBLR].value - bd->length, etsec->rx_padding);
437 
438         if (rem > 0) {
439             memset(padd, 0x0, sizeof(padd));
440             etsec->rx_padding -= rem;
441             *size             -= rem;
442             bd->length        += rem;
443             cpu_physical_memory_write(bufptr, padd, rem);
444         }
445     }
446 }
447 
448 static void rx_init_frame(eTSEC *etsec, const uint8_t *buf, size_t size)
449 {
450     uint32_t fcb_size = 0;
451     uint8_t  prsdep   = (etsec->regs[RCTRL].value >> RCTRL_PRSDEP_OFFSET)
452         & RCTRL_PRSDEP_MASK;
453 
454     if (prsdep != 0) {
455         /* Prepend FCB (FCB size + RCTRL[PAL]) */
456         fcb_size = 8 + ((etsec->regs[RCTRL].value >> 16) & 0x1F);
457 
458         etsec->rx_fcb_size = fcb_size;
459 
460         /* TODO: fill_FCB(etsec); */
461         memset(etsec->rx_fcb, 0x0, sizeof(etsec->rx_fcb));
462 
463     } else {
464         etsec->rx_fcb_size = 0;
465     }
466 
467     g_free(etsec->rx_buffer);
468 
469     /* Do not copy the frame for now */
470     etsec->rx_buffer     = (uint8_t *)buf;
471     etsec->rx_buffer_len = size;
472 
473     /* CRC padding (We don't have to compute the CRC) */
474     etsec->rx_padding = 4;
475 
476     etsec->rx_first_in_frame = 1;
477     etsec->rx_remaining_data = etsec->rx_buffer_len;
478     RING_DEBUG("%s: rx_buffer_len:%u rx_padding+crc:%u\n", __func__,
479                etsec->rx_buffer_len, etsec->rx_padding);
480 }
481 
482 ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size)
483 {
484     int ring_nbr = 0;           /* Always use ring0 (no filer) */
485 
486     if (etsec->rx_buffer_len != 0) {
487         RING_DEBUG("%s: We can't receive now,"
488                    " a buffer is already in the pipe\n", __func__);
489         return 0;
490     }
491 
492     if (etsec->regs[RSTAT].value & 1 << (23 - ring_nbr)) {
493         RING_DEBUG("%s: The ring is halted\n", __func__);
494         return -1;
495     }
496 
497     if (etsec->regs[DMACTRL].value & DMACTRL_GRS) {
498         RING_DEBUG("%s: Graceful receive stop\n", __func__);
499         return -1;
500     }
501 
502     if (!(etsec->regs[MACCFG1].value & MACCFG1_RX_EN)) {
503         RING_DEBUG("%s: MAC Receive not enabled\n", __func__);
504         return -1;
505     }
506 
507     if ((etsec->regs[RCTRL].value & RCTRL_RSF) && (size < 60)) {
508         /* CRC is not in the packet yet, so short frame is below 60 bytes */
509         RING_DEBUG("%s: Drop short frame\n", __func__);
510         return -1;
511     }
512 
513     rx_init_frame(etsec, buf, size);
514 
515     etsec_walk_rx_ring(etsec, ring_nbr);
516 
517     return size;
518 }
519 
520 void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
521 {
522     hwaddr         ring_base     = 0;
523     hwaddr         bd_addr       = 0;
524     hwaddr         start_bd_addr = 0;
525     eTSEC_rxtx_bd  bd;
526     uint16_t       bd_flags;
527     size_t         remaining_data;
528     const uint8_t *buf;
529     uint8_t       *tmp_buf;
530     size_t         size;
531 
532     if (etsec->rx_buffer_len == 0) {
533         /* No frame to send */
534         RING_DEBUG("No frame to send\n");
535         return;
536     }
537 
538     remaining_data = etsec->rx_remaining_data + etsec->rx_padding;
539     buf            = etsec->rx_buffer
540         + (etsec->rx_buffer_len - etsec->rx_remaining_data);
541     size           = etsec->rx_buffer_len + etsec->rx_padding;
542 
543     ring_base = (hwaddr)(etsec->regs[RBASEH].value & 0xF) << 32;
544     ring_base += etsec->regs[RBASE0 + ring_nbr].value & ~0x7;
545     start_bd_addr  = bd_addr = etsec->regs[RBPTR0 + ring_nbr].value & ~0x7;
546 
547     do {
548         read_buffer_descriptor(etsec, bd_addr, &bd);
549 
550 #ifdef DEBUG_BD
551         print_bd(bd,
552                  eTSEC_RECEIVE,
553                  (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd));
554 
555 #endif  /* DEBUG_BD */
556 
557         /* Save flags before BD update */
558         bd_flags = bd.flags;
559 
560         if (bd_flags & BD_RX_EMPTY) {
561             fill_rx_bd(etsec, &bd, &buf, &remaining_data);
562 
563             if (etsec->rx_first_in_frame) {
564                 bd.flags |= BD_RX_FIRST;
565                 etsec->rx_first_in_frame = 0;
566                 etsec->rx_first_bd = bd;
567             }
568 
569             /* Last in frame */
570             if (remaining_data == 0) {
571 
572                 /* Clear flags */
573 
574                 bd.flags &= ~0x7ff;
575 
576                 bd.flags |= BD_LAST;
577 
578                 /* NOTE: non-octet aligned frame is impossible in qemu */
579 
580                 if (size >= etsec->regs[MAXFRM].value) {
581                     /* frame length violation */
582                     qemu_log("%s frame length violation: size:%zu MAXFRM:%d\n",
583                            __func__, size, etsec->regs[MAXFRM].value);
584 
585                     bd.flags |= BD_RX_LG;
586                 }
587 
588                 if (size  < 64) {
589                     /* Short frame */
590                     bd.flags |= BD_RX_SH;
591                 }
592 
593                 /* TODO: Broadcast and Multicast */
594 
595                 if (bd.flags & BD_INTERRUPT) {
596                     /* Set RXFx */
597                     etsec->regs[RSTAT].value |= 1 << (7 - ring_nbr);
598 
599                     /* Set IEVENT */
600                     ievent_set(etsec, IEVENT_RXF);
601                 }
602 
603             } else {
604                 if (bd.flags & BD_INTERRUPT) {
605                     /* Set IEVENT */
606                     ievent_set(etsec, IEVENT_RXB);
607                 }
608             }
609 
610             /* Write back BD after update */
611             write_buffer_descriptor(etsec, bd_addr, &bd);
612         }
613 
614         /* Wrap or next BD */
615         if (bd_flags & BD_WRAP) {
616             bd_addr = ring_base;
617         } else {
618             bd_addr += sizeof(eTSEC_rxtx_bd);
619         }
620     } while (remaining_data != 0
621              && (bd_flags & BD_RX_EMPTY)
622              && bd_addr != start_bd_addr);
623 
624     /* Reset ring ptr */
625     etsec->regs[RBPTR0 + ring_nbr].value = bd_addr;
626 
627     /* The frame is too large to fit in the Rx ring */
628     if (remaining_data > 0) {
629 
630         /* Set RSTAT[QHLTx] */
631         etsec->regs[RSTAT].value |= 1 << (23 - ring_nbr);
632 
633         /* Save remaining data to send the end of the frame when the ring will
634          * be restarted
635          */
636         etsec->rx_remaining_data = remaining_data;
637 
638         /* Copy the frame */
639         tmp_buf = g_malloc(size);
640         memcpy(tmp_buf, etsec->rx_buffer, size);
641         etsec->rx_buffer = tmp_buf;
642 
643         RING_DEBUG("no empty RxBD available any more\n");
644     } else {
645         etsec->rx_buffer_len = 0;
646         etsec->rx_buffer     = NULL;
647         if (etsec->need_flush) {
648             qemu_flush_queued_packets(qemu_get_queue(etsec->nic));
649         }
650     }
651 
652     RING_DEBUG("eTSEC End of ring_write: remaining_data:%zu\n", remaining_data);
653 }
654