1 /******************************************************************************* 2 This contains the functions to handle the enhanced descriptors. 3 4 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 23 *******************************************************************************/ 24 25 #include "common.h" 26 #include "descs_com.h" 27 28 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, 29 struct dma_desc *p, void __iomem *ioaddr) 30 { 31 int ret = 0; 32 struct net_device_stats *stats = (struct net_device_stats *)data; 33 34 if (unlikely(p->des01.etx.error_summary)) { 35 CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); 36 if (unlikely(p->des01.etx.jabber_timeout)) { 37 CHIP_DBG(KERN_ERR "\tjabber_timeout error\n"); 38 x->tx_jabber++; 39 } 40 41 if (unlikely(p->des01.etx.frame_flushed)) { 42 CHIP_DBG(KERN_ERR "\tframe_flushed error\n"); 43 x->tx_frame_flushed++; 44 dwmac_dma_flush_tx_fifo(ioaddr); 45 } 46 47 if (unlikely(p->des01.etx.loss_carrier)) { 48 CHIP_DBG(KERN_ERR "\tloss_carrier error\n"); 49 x->tx_losscarrier++; 50 stats->tx_carrier_errors++; 51 } 52 if (unlikely(p->des01.etx.no_carrier)) { 53 CHIP_DBG(KERN_ERR "\tno_carrier error\n"); 54 x->tx_carrier++; 55 stats->tx_carrier_errors++; 56 } 57 if (unlikely(p->des01.etx.late_collision)) { 58 CHIP_DBG(KERN_ERR "\tlate_collision error\n"); 59 stats->collisions += p->des01.etx.collision_count; 60 } 61 if (unlikely(p->des01.etx.excessive_collisions)) { 62 CHIP_DBG(KERN_ERR "\texcessive_collisions\n"); 63 stats->collisions += p->des01.etx.collision_count; 64 } 65 if (unlikely(p->des01.etx.excessive_deferral)) { 66 CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n"); 67 x->tx_deferred++; 68 } 69 70 if (unlikely(p->des01.etx.underflow_error)) { 71 CHIP_DBG(KERN_ERR "\tunderflow error\n"); 72 dwmac_dma_flush_tx_fifo(ioaddr); 73 x->tx_underflow++; 74 } 75 76 if (unlikely(p->des01.etx.ip_header_error)) { 77 CHIP_DBG(KERN_ERR "\tTX IP header csum error\n"); 78 x->tx_ip_header_error++; 79 } 80 81 if (unlikely(p->des01.etx.payload_error)) { 82 CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n"); 83 x->tx_payload_error++; 84 dwmac_dma_flush_tx_fifo(ioaddr); 85 } 86 87 ret = -1; 88 } 89 90 if (unlikely(p->des01.etx.deferred)) { 91 CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n"); 92 x->tx_deferred++; 93 } 94 #ifdef STMMAC_VLAN_TAG_USED 95 if (p->des01.etx.vlan_frame) { 96 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); 97 x->tx_vlan++; 98 } 99 #endif 100 101 return ret; 102 } 103 104 static int enh_desc_get_tx_len(struct dma_desc *p) 105 { 106 return p->des01.etx.buffer1_size; 107 } 108 109 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) 110 { 111 int ret = good_frame; 112 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 113 114 /* bits 5 7 0 | Frame status 115 * ---------------------------------------------------------- 116 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) 117 * 1 0 0 | IPv4/6 No CSUM errorS. 118 * 1 0 1 | IPv4/6 CSUM PAYLOAD error 119 * 1 1 0 | IPv4/6 CSUM IP HR error 120 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS 121 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD 122 * 0 1 1 | COE bypassed.. no IPv4/6 frame 123 * 0 1 0 | Reserved. 124 */ 125 if (status == 0x0) { 126 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); 127 ret = llc_snap; 128 } else if (status == 0x4) { 129 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); 130 ret = good_frame; 131 } else if (status == 0x5) { 132 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); 133 ret = csum_none; 134 } else if (status == 0x6) { 135 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); 136 ret = csum_none; 137 } else if (status == 0x7) { 138 CHIP_DBG(KERN_ERR 139 "RX Des0 status: IPv4/6 Header and Payload Error.\n"); 140 ret = csum_none; 141 } else if (status == 0x1) { 142 CHIP_DBG(KERN_ERR 143 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); 144 ret = discard_frame; 145 } else if (status == 0x3) { 146 CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); 147 ret = discard_frame; 148 } 149 return ret; 150 } 151 152 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 153 struct dma_desc *p) 154 { 155 int ret = good_frame; 156 struct net_device_stats *stats = (struct net_device_stats *)data; 157 158 if (unlikely(p->des01.erx.error_summary)) { 159 CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n", 160 p->des01.erx); 161 if (unlikely(p->des01.erx.descriptor_error)) { 162 CHIP_DBG(KERN_ERR "\tdescriptor error\n"); 163 x->rx_desc++; 164 stats->rx_length_errors++; 165 } 166 if (unlikely(p->des01.erx.overflow_error)) { 167 CHIP_DBG(KERN_ERR "\toverflow error\n"); 168 x->rx_gmac_overflow++; 169 } 170 171 if (unlikely(p->des01.erx.ipc_csum_error)) 172 CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); 173 174 if (unlikely(p->des01.erx.late_collision)) { 175 CHIP_DBG(KERN_ERR "\tlate_collision error\n"); 176 stats->collisions++; 177 stats->collisions++; 178 } 179 if (unlikely(p->des01.erx.receive_watchdog)) { 180 CHIP_DBG(KERN_ERR "\treceive_watchdog error\n"); 181 x->rx_watchdog++; 182 } 183 if (unlikely(p->des01.erx.error_gmii)) { 184 CHIP_DBG(KERN_ERR "\tReceive Error\n"); 185 x->rx_mii++; 186 } 187 if (unlikely(p->des01.erx.crc_error)) { 188 CHIP_DBG(KERN_ERR "\tCRC error\n"); 189 x->rx_crc++; 190 stats->rx_crc_errors++; 191 } 192 ret = discard_frame; 193 } 194 195 /* After a payload csum error, the ES bit is set. 196 * It doesn't match with the information reported into the databook. 197 * At any rate, we need to understand if the CSUM hw computation is ok 198 * and report this info to the upper layers. */ 199 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 200 p->des01.erx.frame_type, p->des01.erx.payload_csum_error); 201 202 if (unlikely(p->des01.erx.dribbling)) { 203 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); 204 x->dribbling_bit++; 205 } 206 if (unlikely(p->des01.erx.sa_filter_fail)) { 207 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); 208 x->sa_rx_filter_fail++; 209 ret = discard_frame; 210 } 211 if (unlikely(p->des01.erx.da_filter_fail)) { 212 CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n"); 213 x->da_rx_filter_fail++; 214 ret = discard_frame; 215 } 216 if (unlikely(p->des01.erx.length_error)) { 217 CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n"); 218 x->rx_length++; 219 ret = discard_frame; 220 } 221 #ifdef STMMAC_VLAN_TAG_USED 222 if (p->des01.erx.vlan_tag) { 223 CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); 224 x->rx_vlan++; 225 } 226 #endif 227 return ret; 228 } 229 230 static void enh_desc_init_rx_desc(struct dma_desc *p, unsigned int ring_size, 231 int disable_rx_ic) 232 { 233 int i; 234 for (i = 0; i < ring_size; i++) { 235 p->des01.erx.own = 1; 236 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; 237 238 ehn_desc_rx_set_on_ring_chain(p, (i == ring_size - 1)); 239 240 if (disable_rx_ic) 241 p->des01.erx.disable_ic = 1; 242 p++; 243 } 244 } 245 246 static void enh_desc_init_tx_desc(struct dma_desc *p, unsigned int ring_size) 247 { 248 int i; 249 250 for (i = 0; i < ring_size; i++) { 251 p->des01.etx.own = 0; 252 ehn_desc_tx_set_on_ring_chain(p, (i == ring_size - 1)); 253 p++; 254 } 255 } 256 257 static int enh_desc_get_tx_owner(struct dma_desc *p) 258 { 259 return p->des01.etx.own; 260 } 261 262 static int enh_desc_get_rx_owner(struct dma_desc *p) 263 { 264 return p->des01.erx.own; 265 } 266 267 static void enh_desc_set_tx_owner(struct dma_desc *p) 268 { 269 p->des01.etx.own = 1; 270 } 271 272 static void enh_desc_set_rx_owner(struct dma_desc *p) 273 { 274 p->des01.erx.own = 1; 275 } 276 277 static int enh_desc_get_tx_ls(struct dma_desc *p) 278 { 279 return p->des01.etx.last_segment; 280 } 281 282 static void enh_desc_release_tx_desc(struct dma_desc *p) 283 { 284 int ter = p->des01.etx.end_ring; 285 286 memset(p, 0, offsetof(struct dma_desc, des2)); 287 enh_desc_end_tx_desc(p, ter); 288 } 289 290 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 291 int csum_flag) 292 { 293 p->des01.etx.first_segment = is_fs; 294 295 enh_set_tx_desc_len(p, len); 296 297 if (likely(csum_flag)) 298 p->des01.etx.checksum_insertion = cic_full; 299 } 300 301 static void enh_desc_clear_tx_ic(struct dma_desc *p) 302 { 303 p->des01.etx.interrupt = 0; 304 } 305 306 static void enh_desc_close_tx_desc(struct dma_desc *p) 307 { 308 p->des01.etx.last_segment = 1; 309 p->des01.etx.interrupt = 1; 310 } 311 312 static int enh_desc_get_rx_frame_len(struct dma_desc *p) 313 { 314 return p->des01.erx.frame_length; 315 } 316 317 const struct stmmac_desc_ops enh_desc_ops = { 318 .tx_status = enh_desc_get_tx_status, 319 .rx_status = enh_desc_get_rx_status, 320 .get_tx_len = enh_desc_get_tx_len, 321 .init_rx_desc = enh_desc_init_rx_desc, 322 .init_tx_desc = enh_desc_init_tx_desc, 323 .get_tx_owner = enh_desc_get_tx_owner, 324 .get_rx_owner = enh_desc_get_rx_owner, 325 .release_tx_desc = enh_desc_release_tx_desc, 326 .prepare_tx_desc = enh_desc_prepare_tx_desc, 327 .clear_tx_ic = enh_desc_clear_tx_ic, 328 .close_tx_desc = enh_desc_close_tx_desc, 329 .get_tx_ls = enh_desc_get_tx_ls, 330 .set_tx_owner = enh_desc_set_tx_owner, 331 .set_rx_owner = enh_desc_set_rx_owner, 332 .get_rx_frame_len = enh_desc_get_rx_frame_len, 333 }; 334