xref: /openbmc/qemu/hw/net/fsl_etsec/rings.c (revision 835fde4a)
1 /*
2  * QEMU Freescale eTSEC Emulator
3  *
4  * Copyright (c) 2011-2013 AdaCore
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to deal
8  * in the Software without restriction, including without limitation the rights
9  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10  * copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22  * THE SOFTWARE.
23  */
24 #include "qemu/osdep.h"
25 #include "qemu-common.h"
26 #include "net/checksum.h"
27 #include "qemu/log.h"
28 #include "etsec.h"
29 #include "registers.h"
30 
31 /* #define ETSEC_RING_DEBUG */
32 /* #define HEX_DUMP */
33 /* #define DEBUG_BD */
34 
35 #ifdef ETSEC_RING_DEBUG
36 static const int debug_etsec = 1;
37 #else
38 static const int debug_etsec;
39 #endif
40 
41 #define RING_DEBUG(fmt, ...) do {              \
42  if (debug_etsec) {                            \
43         qemu_log(fmt , ## __VA_ARGS__);        \
44     }                                          \
45     } while (0)
46 
47 #ifdef DEBUG_BD
48 
49 static void print_tx_bd_flags(uint16_t flags)
50 {
51     qemu_log("      Ready: %d\n", !!(flags & BD_TX_READY));
52     qemu_log("      PAD/CRC: %d\n", !!(flags & BD_TX_PADCRC));
53     qemu_log("      Wrap: %d\n", !!(flags & BD_WRAP));
54     qemu_log("      Interrupt: %d\n", !!(flags & BD_INTERRUPT));
55     qemu_log("      Last in frame: %d\n", !!(flags & BD_LAST));
56     qemu_log("      Tx CRC: %d\n", !!(flags & BD_TX_TC));
57     qemu_log("      User-defined preamble / defer: %d\n",
58            !!(flags & BD_TX_PREDEF));
59     qemu_log("      Huge frame enable / Late collision: %d\n",
60            !!(flags & BD_TX_HFELC));
61     qemu_log("      Control frame / Retransmission Limit: %d\n",
62            !!(flags & BD_TX_CFRL));
63     qemu_log("      Retry count: %d\n",
64            (flags >> BD_TX_RC_OFFSET) & BD_TX_RC_MASK);
65     qemu_log("      Underrun / TCP/IP off-load enable: %d\n",
66            !!(flags & BD_TX_TOEUN));
67     qemu_log("      Truncation: %d\n", !!(flags & BD_TX_TR));
68 }
69 
70 static void print_rx_bd_flags(uint16_t flags)
71 {
72     qemu_log("      Empty: %d\n", !!(flags & BD_RX_EMPTY));
73     qemu_log("      Receive software ownership: %d\n", !!(flags & BD_RX_RO1));
74     qemu_log("      Wrap: %d\n", !!(flags & BD_WRAP));
75     qemu_log("      Interrupt: %d\n", !!(flags & BD_INTERRUPT));
76     qemu_log("      Last in frame: %d\n", !!(flags & BD_LAST));
77     qemu_log("      First in frame: %d\n", !!(flags & BD_RX_FIRST));
78     qemu_log("      Miss: %d\n", !!(flags & BD_RX_MISS));
79     qemu_log("      Broadcast: %d\n", !!(flags & BD_RX_BROADCAST));
80     qemu_log("      Multicast: %d\n", !!(flags & BD_RX_MULTICAST));
81     qemu_log("      Rx frame length violation: %d\n", !!(flags & BD_RX_LG));
82     qemu_log("      Rx non-octet aligned frame: %d\n", !!(flags & BD_RX_NO));
83     qemu_log("      Short frame: %d\n", !!(flags & BD_RX_SH));
84     qemu_log("      Rx CRC Error: %d\n", !!(flags & BD_RX_CR));
85     qemu_log("      Overrun: %d\n", !!(flags & BD_RX_OV));
86     qemu_log("      Truncation: %d\n", !!(flags & BD_RX_TR));
87 }
88 
89 
90 static void print_bd(eTSEC_rxtx_bd bd, int mode, uint32_t index)
91 {
92     qemu_log("eTSEC %s Data Buffer Descriptor (%u)\n",
93            mode == eTSEC_TRANSMIT ? "Transmit" : "Receive",
94            index);
95     qemu_log("   Flags   : 0x%04x\n", bd.flags);
96     if (mode == eTSEC_TRANSMIT) {
97         print_tx_bd_flags(bd.flags);
98     } else {
99         print_rx_bd_flags(bd.flags);
100     }
101     qemu_log("   Length  : 0x%04x\n", bd.length);
102     qemu_log("   Pointer : 0x%08x\n", bd.bufptr);
103 }
104 
105 #endif  /* DEBUG_BD */
106 
107 static void read_buffer_descriptor(eTSEC         *etsec,
108                                    hwaddr         addr,
109                                    eTSEC_rxtx_bd *bd)
110 {
111     assert(bd != NULL);
112 
113     RING_DEBUG("READ Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr);
114     cpu_physical_memory_read(addr,
115                              bd,
116                              sizeof(eTSEC_rxtx_bd));
117 
118     if (etsec->regs[DMACTRL].value & DMACTRL_LE) {
119         bd->flags  = lduw_le_p(&bd->flags);
120         bd->length = lduw_le_p(&bd->length);
121         bd->bufptr = ldl_le_p(&bd->bufptr);
122     } else {
123         bd->flags  = lduw_be_p(&bd->flags);
124         bd->length = lduw_be_p(&bd->length);
125         bd->bufptr = ldl_be_p(&bd->bufptr);
126     }
127 }
128 
129 static void write_buffer_descriptor(eTSEC         *etsec,
130                                     hwaddr         addr,
131                                     eTSEC_rxtx_bd *bd)
132 {
133     assert(bd != NULL);
134 
135     if (etsec->regs[DMACTRL].value & DMACTRL_LE) {
136         stw_le_p(&bd->flags, bd->flags);
137         stw_le_p(&bd->length, bd->length);
138         stl_le_p(&bd->bufptr, bd->bufptr);
139     } else {
140         stw_be_p(&bd->flags, bd->flags);
141         stw_be_p(&bd->length, bd->length);
142         stl_be_p(&bd->bufptr, bd->bufptr);
143     }
144 
145     RING_DEBUG("Write Buffer Descriptor @ 0x" TARGET_FMT_plx"\n", addr);
146     cpu_physical_memory_write(addr,
147                               bd,
148                               sizeof(eTSEC_rxtx_bd));
149 }
150 
151 static void ievent_set(eTSEC    *etsec,
152                        uint32_t  flags)
153 {
154     etsec->regs[IEVENT].value |= flags;
155 
156     etsec_update_irq(etsec);
157 }
158 
159 static void tx_padding_and_crc(eTSEC *etsec, uint32_t min_frame_len)
160 {
161     int add = min_frame_len - etsec->tx_buffer_len;
162 
163     /* Padding */
164     if (add > 0) {
165         RING_DEBUG("pad:%u\n", add);
166         etsec->tx_buffer = g_realloc(etsec->tx_buffer,
167                                         etsec->tx_buffer_len + add);
168 
169         memset(etsec->tx_buffer + etsec->tx_buffer_len, 0x0, add);
170         etsec->tx_buffer_len += add;
171     }
172 
173     /* Never add CRC in QEMU */
174 }
175 
176 static void process_tx_fcb(eTSEC *etsec)
177 {
178     uint8_t flags = (uint8_t)(*etsec->tx_buffer);
179     /* L3 header offset from start of frame */
180     uint8_t l3_header_offset = (uint8_t)*(etsec->tx_buffer + 3);
181     /* L4 header offset from start of L3 header */
182     uint8_t l4_header_offset = (uint8_t)*(etsec->tx_buffer + 2);
183     /* L3 header */
184     uint8_t *l3_header = etsec->tx_buffer + 8 + l3_header_offset;
185     /* L4 header */
186     uint8_t *l4_header = l3_header + l4_header_offset;
187     int csum = 0;
188 
189     /* if packet is IP4 and IP checksum is requested */
190     if (flags & FCB_TX_IP && flags & FCB_TX_CIP) {
191         csum |= CSUM_IP;
192     }
193     /* TODO Check the correct usage of the PHCS field of the FCB in case the NPH
194      * flag is on */
195 
196     /* if packet is IP4 and TCP or UDP */
197     if (flags & FCB_TX_IP && flags & FCB_TX_TUP) {
198         /* if UDP */
199         if (flags & FCB_TX_UDP) {
200             /* if checksum is requested */
201             if (flags & FCB_TX_CTU) {
202                 /* do UDP checksum */
203                 csum |= CSUM_UDP;
204             } else {
205                 /* set checksum field to 0 */
206                 l4_header[6] = 0;
207                 l4_header[7] = 0;
208             }
209         } else if (flags & FCB_TX_CTU) { /* if TCP and checksum is requested */
210             /* do TCP checksum */
211             csum |= CSUM_TCP;
212         }
213     }
214 
215     if (csum) {
216         net_checksum_calculate(etsec->tx_buffer + 8,
217                                etsec->tx_buffer_len - 8, csum);
218     }
219 }
220 
221 static void process_tx_bd(eTSEC         *etsec,
222                           eTSEC_rxtx_bd *bd)
223 {
224     uint8_t *tmp_buff = NULL;
225     hwaddr tbdbth     = (hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32;
226 
227     if (bd->length == 0) {
228         /* ERROR */
229         return;
230     }
231 
232     if (etsec->tx_buffer_len == 0) {
233         /* It's the first BD */
234         etsec->first_bd = *bd;
235     }
236 
237     /* TODO: if TxBD[TOE/UN] skip the Tx Frame Control Block*/
238 
239     /* Load this Data Buffer */
240     etsec->tx_buffer = g_realloc(etsec->tx_buffer,
241                                     etsec->tx_buffer_len + bd->length);
242     tmp_buff = etsec->tx_buffer + etsec->tx_buffer_len;
243     cpu_physical_memory_read(bd->bufptr + tbdbth, tmp_buff, bd->length);
244 
245     /* Update buffer length */
246     etsec->tx_buffer_len += bd->length;
247 
248 
249     if (etsec->tx_buffer_len != 0 && (bd->flags & BD_LAST)) {
250         if (etsec->regs[MACCFG1].value & MACCFG1_TX_EN) {
251             /* MAC Transmit enabled */
252 
253             /* Process offload Tx FCB */
254             if (etsec->first_bd.flags & BD_TX_TOEUN) {
255                 process_tx_fcb(etsec);
256             }
257 
258             if (etsec->first_bd.flags & BD_TX_PADCRC
259                 || etsec->regs[MACCFG2].value & MACCFG2_PADCRC) {
260 
261                 /* Padding and CRC (Padding implies CRC) */
262                 tx_padding_and_crc(etsec, 64);
263 
264             } else if (etsec->first_bd.flags & BD_TX_TC
265                        || etsec->regs[MACCFG2].value & MACCFG2_CRC_EN) {
266 
267                 /* Only CRC */
268                 /* Never add CRC in QEMU */
269             }
270 
271 #if defined(HEX_DUMP)
272             qemu_log("eTSEC Send packet size:%d\n", etsec->tx_buffer_len);
273             qemu_hexdump(stderr, "", etsec->tx_buffer, etsec->tx_buffer_len);
274 #endif  /* ETSEC_RING_DEBUG */
275 
276             if (etsec->first_bd.flags & BD_TX_TOEUN) {
277                 qemu_send_packet(qemu_get_queue(etsec->nic),
278                         etsec->tx_buffer + 8,
279                         etsec->tx_buffer_len - 8);
280             } else {
281                 qemu_send_packet(qemu_get_queue(etsec->nic),
282                         etsec->tx_buffer,
283                         etsec->tx_buffer_len);
284             }
285 
286         }
287 
288         etsec->tx_buffer_len = 0;
289 
290         if (bd->flags & BD_INTERRUPT) {
291             ievent_set(etsec, IEVENT_TXF);
292         }
293     } else {
294         if (bd->flags & BD_INTERRUPT) {
295             ievent_set(etsec, IEVENT_TXB);
296         }
297     }
298 
299     /* Update DB flags */
300 
301     /* Clear Ready */
302     bd->flags &= ~BD_TX_READY;
303 
304     /* Clear Defer */
305     bd->flags &= ~BD_TX_PREDEF;
306 
307     /* Clear Late Collision */
308     bd->flags &= ~BD_TX_HFELC;
309 
310     /* Clear Retransmission Limit */
311     bd->flags &= ~BD_TX_CFRL;
312 
313     /* Clear Retry Count */
314     bd->flags &= ~(BD_TX_RC_MASK << BD_TX_RC_OFFSET);
315 
316     /* Clear Underrun */
317     bd->flags &= ~BD_TX_TOEUN;
318 
319     /* Clear Truncation */
320     bd->flags &= ~BD_TX_TR;
321 }
322 
323 void etsec_walk_tx_ring(eTSEC *etsec, int ring_nbr)
324 {
325     hwaddr        ring_base = 0;
326     hwaddr        bd_addr   = 0;
327     eTSEC_rxtx_bd bd;
328     uint16_t      bd_flags;
329 
330     if (!(etsec->regs[MACCFG1].value & MACCFG1_TX_EN)) {
331         RING_DEBUG("%s: MAC Transmit not enabled\n", __func__);
332         return;
333     }
334 
335     ring_base = (hwaddr)(etsec->regs[TBASEH].value & 0xF) << 32;
336     ring_base += etsec->regs[TBASE0 + ring_nbr].value & ~0x7;
337     bd_addr    = etsec->regs[TBPTR0 + ring_nbr].value & ~0x7;
338 
339     do {
340         read_buffer_descriptor(etsec, bd_addr, &bd);
341 
342 #ifdef DEBUG_BD
343         print_bd(bd,
344                  eTSEC_TRANSMIT,
345                  (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd));
346 
347 #endif  /* DEBUG_BD */
348 
349         /* Save flags before BD update */
350         bd_flags = bd.flags;
351 
352         if (!(bd_flags & BD_TX_READY)) {
353             break;
354         }
355 
356         process_tx_bd(etsec, &bd);
357         /* Write back BD after update */
358         write_buffer_descriptor(etsec, bd_addr, &bd);
359 
360         /* Wrap or next BD */
361         if (bd_flags & BD_WRAP) {
362             bd_addr = ring_base;
363         } else {
364             bd_addr += sizeof(eTSEC_rxtx_bd);
365         }
366     } while (TRUE);
367 
368     /* Save the Buffer Descriptor Pointers to last bd that was not
369      * succesfully closed */
370     etsec->regs[TBPTR0 + ring_nbr].value = bd_addr;
371 
372     /* Set transmit halt THLTx */
373     etsec->regs[TSTAT].value |= 1 << (31 - ring_nbr);
374 }
375 
376 static void fill_rx_bd(eTSEC          *etsec,
377                        eTSEC_rxtx_bd  *bd,
378                        const uint8_t **buf,
379                        size_t         *size)
380 {
381     uint16_t to_write;
382     hwaddr   bufptr = bd->bufptr +
383         ((hwaddr)(etsec->regs[TBDBPH].value & 0xF) << 32);
384     uint8_t  padd[etsec->rx_padding];
385     uint8_t  rem;
386 
387     RING_DEBUG("eTSEC fill Rx buffer @ 0x%016" HWADDR_PRIx
388                " size:%zu(padding + crc:%u) + fcb:%u\n",
389                bufptr, *size, etsec->rx_padding, etsec->rx_fcb_size);
390 
391     bd->length = 0;
392 
393     /* This operation will only write FCB */
394     if (etsec->rx_fcb_size != 0) {
395 
396         cpu_physical_memory_write(bufptr, etsec->rx_fcb, etsec->rx_fcb_size);
397 
398         bufptr             += etsec->rx_fcb_size;
399         bd->length         += etsec->rx_fcb_size;
400         etsec->rx_fcb_size  = 0;
401 
402     }
403 
404     /* We remove padding from the computation of to_write because it is not
405      * allocated in the buffer.
406      */
407     to_write = MIN(*size - etsec->rx_padding,
408                    etsec->regs[MRBLR].value - etsec->rx_fcb_size);
409 
410     /* This operation can only write packet data and no padding */
411     if (to_write > 0) {
412         cpu_physical_memory_write(bufptr, *buf, to_write);
413 
414         *buf   += to_write;
415         bufptr += to_write;
416         *size  -= to_write;
417 
418         bd->flags  &= ~BD_RX_EMPTY;
419         bd->length += to_write;
420     }
421 
422     if (*size == etsec->rx_padding) {
423         /* The remaining bytes are only for padding which is not actually
424          * allocated in the data buffer.
425          */
426 
427         rem = MIN(etsec->regs[MRBLR].value - bd->length, etsec->rx_padding);
428 
429         if (rem > 0) {
430             memset(padd, 0x0, sizeof(padd));
431             etsec->rx_padding -= rem;
432             *size             -= rem;
433             bd->length        += rem;
434             cpu_physical_memory_write(bufptr, padd, rem);
435         }
436     }
437 }
438 
439 static void rx_init_frame(eTSEC *etsec, const uint8_t *buf, size_t size)
440 {
441     uint32_t fcb_size = 0;
442     uint8_t  prsdep   = (etsec->regs[RCTRL].value >> RCTRL_PRSDEP_OFFSET)
443         & RCTRL_PRSDEP_MASK;
444 
445     if (prsdep != 0) {
446         /* Prepend FCB (FCB size + RCTRL[PAL]) */
447         fcb_size = 8 + ((etsec->regs[RCTRL].value >> 16) & 0x1F);
448 
449         etsec->rx_fcb_size = fcb_size;
450 
451         /* TODO: fill_FCB(etsec); */
452         memset(etsec->rx_fcb, 0x0, sizeof(etsec->rx_fcb));
453 
454     } else {
455         etsec->rx_fcb_size = 0;
456     }
457 
458     g_free(etsec->rx_buffer);
459 
460     /* Do not copy the frame for now */
461     etsec->rx_buffer     = (uint8_t *)buf;
462     etsec->rx_buffer_len = size;
463 
464     /* CRC padding (We don't have to compute the CRC) */
465     etsec->rx_padding = 4;
466 
467     /*
468      * Ensure that payload length + CRC length is at least 802.3
469      * minimum MTU size bytes long (64)
470      */
471     if (etsec->rx_buffer_len < 60) {
472         etsec->rx_padding += 60 - etsec->rx_buffer_len;
473     }
474 
475     etsec->rx_first_in_frame = 1;
476     etsec->rx_remaining_data = etsec->rx_buffer_len;
477     RING_DEBUG("%s: rx_buffer_len:%u rx_padding+crc:%u\n", __func__,
478                etsec->rx_buffer_len, etsec->rx_padding);
479 }
480 
481 ssize_t etsec_rx_ring_write(eTSEC *etsec, const uint8_t *buf, size_t size)
482 {
483     int ring_nbr = 0;           /* Always use ring0 (no filer) */
484 
485     if (etsec->rx_buffer_len != 0) {
486         RING_DEBUG("%s: We can't receive now,"
487                    " a buffer is already in the pipe\n", __func__);
488         return 0;
489     }
490 
491     if (etsec->regs[RSTAT].value & 1 << (23 - ring_nbr)) {
492         RING_DEBUG("%s: The ring is halted\n", __func__);
493         return -1;
494     }
495 
496     if (etsec->regs[DMACTRL].value & DMACTRL_GRS) {
497         RING_DEBUG("%s: Graceful receive stop\n", __func__);
498         return -1;
499     }
500 
501     if (!(etsec->regs[MACCFG1].value & MACCFG1_RX_EN)) {
502         RING_DEBUG("%s: MAC Receive not enabled\n", __func__);
503         return -1;
504     }
505 
506     if (!(etsec->regs[RCTRL].value & RCTRL_RSF) && (size < 60)) {
507         /* CRC is not in the packet yet, so short frame is below 60 bytes */
508         RING_DEBUG("%s: Drop short frame\n", __func__);
509         return -1;
510     }
511 
512     rx_init_frame(etsec, buf, size);
513 
514     etsec_walk_rx_ring(etsec, ring_nbr);
515 
516     return size;
517 }
518 
519 void etsec_walk_rx_ring(eTSEC *etsec, int ring_nbr)
520 {
521     hwaddr         ring_base     = 0;
522     hwaddr         bd_addr       = 0;
523     hwaddr         start_bd_addr = 0;
524     eTSEC_rxtx_bd  bd;
525     uint16_t       bd_flags;
526     size_t         remaining_data;
527     const uint8_t *buf;
528     uint8_t       *tmp_buf;
529     size_t         size;
530 
531     if (etsec->rx_buffer_len == 0) {
532         /* No frame to send */
533         RING_DEBUG("No frame to send\n");
534         return;
535     }
536 
537     remaining_data = etsec->rx_remaining_data + etsec->rx_padding;
538     buf            = etsec->rx_buffer
539         + (etsec->rx_buffer_len - etsec->rx_remaining_data);
540     size           = etsec->rx_buffer_len + etsec->rx_padding;
541 
542     ring_base = (hwaddr)(etsec->regs[RBASEH].value & 0xF) << 32;
543     ring_base += etsec->regs[RBASE0 + ring_nbr].value & ~0x7;
544     start_bd_addr  = bd_addr = etsec->regs[RBPTR0 + ring_nbr].value & ~0x7;
545 
546     do {
547         read_buffer_descriptor(etsec, bd_addr, &bd);
548 
549 #ifdef DEBUG_BD
550         print_bd(bd,
551                  eTSEC_RECEIVE,
552                  (bd_addr - ring_base) / sizeof(eTSEC_rxtx_bd));
553 
554 #endif  /* DEBUG_BD */
555 
556         /* Save flags before BD update */
557         bd_flags = bd.flags;
558 
559         if (bd_flags & BD_RX_EMPTY) {
560             fill_rx_bd(etsec, &bd, &buf, &remaining_data);
561 
562             if (etsec->rx_first_in_frame) {
563                 bd.flags |= BD_RX_FIRST;
564                 etsec->rx_first_in_frame = 0;
565                 etsec->rx_first_bd = bd;
566             }
567 
568             /* Last in frame */
569             if (remaining_data == 0) {
570 
571                 /* Clear flags */
572 
573                 bd.flags &= ~0x7ff;
574 
575                 bd.flags |= BD_LAST;
576 
577                 /* NOTE: non-octet aligned frame is impossible in qemu */
578 
579                 if (size >= etsec->regs[MAXFRM].value) {
580                     /* frame length violation */
581                     qemu_log("%s frame length violation: size:%zu MAXFRM:%d\n",
582                            __func__, size, etsec->regs[MAXFRM].value);
583 
584                     bd.flags |= BD_RX_LG;
585                 }
586 
587                 if (size  < 64) {
588                     /* Short frame */
589                     bd.flags |= BD_RX_SH;
590                 }
591 
592                 /* TODO: Broadcast and Multicast */
593 
594                 if (bd.flags & BD_INTERRUPT) {
595                     /* Set RXFx */
596                     etsec->regs[RSTAT].value |= 1 << (7 - ring_nbr);
597 
598                     /* Set IEVENT */
599                     ievent_set(etsec, IEVENT_RXF);
600                 }
601 
602             } else {
603                 if (bd.flags & BD_INTERRUPT) {
604                     /* Set IEVENT */
605                     ievent_set(etsec, IEVENT_RXB);
606                 }
607             }
608 
609             /* Write back BD after update */
610             write_buffer_descriptor(etsec, bd_addr, &bd);
611         }
612 
613         /* Wrap or next BD */
614         if (bd_flags & BD_WRAP) {
615             bd_addr = ring_base;
616         } else {
617             bd_addr += sizeof(eTSEC_rxtx_bd);
618         }
619     } while (remaining_data != 0
620              && (bd_flags & BD_RX_EMPTY)
621              && bd_addr != start_bd_addr);
622 
623     /* Reset ring ptr */
624     etsec->regs[RBPTR0 + ring_nbr].value = bd_addr;
625 
626     /* The frame is too large to fit in the Rx ring */
627     if (remaining_data > 0) {
628 
629         /* Set RSTAT[QHLTx] */
630         etsec->regs[RSTAT].value |= 1 << (23 - ring_nbr);
631 
632         /* Save remaining data to send the end of the frame when the ring will
633          * be restarted
634          */
635         etsec->rx_remaining_data = remaining_data;
636 
637         /* Copy the frame */
638         tmp_buf = g_malloc(size);
639         memcpy(tmp_buf, etsec->rx_buffer, size);
640         etsec->rx_buffer = tmp_buf;
641 
642         RING_DEBUG("no empty RxBD available any more\n");
643     } else {
644         etsec->rx_buffer_len = 0;
645         etsec->rx_buffer     = NULL;
646         if (etsec->need_flush) {
647             qemu_flush_queued_packets(qemu_get_queue(etsec->nic));
648         }
649     }
650 
651     RING_DEBUG("eTSEC End of ring_write: remaining_data:%zu\n", remaining_data);
652 }
653