1 /******************************************************************************* 2 This contains the functions to handle the enhanced descriptors. 3 4 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 23 *******************************************************************************/ 24 25 #include <linux/stmmac.h> 26 #include "common.h" 27 #include "descs_com.h" 28 29 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, 30 struct dma_desc *p, void __iomem *ioaddr) 31 { 32 int ret = 0; 33 struct net_device_stats *stats = (struct net_device_stats *)data; 34 35 if (unlikely(p->des01.etx.error_summary)) { 36 CHIP_DBG(KERN_ERR "GMAC TX error... 0x%08x\n", p->des01.etx); 37 if (unlikely(p->des01.etx.jabber_timeout)) { 38 CHIP_DBG(KERN_ERR "\tjabber_timeout error\n"); 39 x->tx_jabber++; 40 } 41 42 if (unlikely(p->des01.etx.frame_flushed)) { 43 CHIP_DBG(KERN_ERR "\tframe_flushed error\n"); 44 x->tx_frame_flushed++; 45 dwmac_dma_flush_tx_fifo(ioaddr); 46 } 47 48 if (unlikely(p->des01.etx.loss_carrier)) { 49 CHIP_DBG(KERN_ERR "\tloss_carrier error\n"); 50 x->tx_losscarrier++; 51 stats->tx_carrier_errors++; 52 } 53 if (unlikely(p->des01.etx.no_carrier)) { 54 CHIP_DBG(KERN_ERR "\tno_carrier error\n"); 55 x->tx_carrier++; 56 stats->tx_carrier_errors++; 57 } 58 if (unlikely(p->des01.etx.late_collision)) { 59 CHIP_DBG(KERN_ERR "\tlate_collision error\n"); 60 stats->collisions += p->des01.etx.collision_count; 61 } 62 if (unlikely(p->des01.etx.excessive_collisions)) { 63 CHIP_DBG(KERN_ERR "\texcessive_collisions\n"); 64 stats->collisions += p->des01.etx.collision_count; 65 } 66 if (unlikely(p->des01.etx.excessive_deferral)) { 67 CHIP_DBG(KERN_INFO "\texcessive tx_deferral\n"); 68 x->tx_deferred++; 69 } 70 71 if (unlikely(p->des01.etx.underflow_error)) { 72 CHIP_DBG(KERN_ERR "\tunderflow error\n"); 73 dwmac_dma_flush_tx_fifo(ioaddr); 74 x->tx_underflow++; 75 } 76 77 if (unlikely(p->des01.etx.ip_header_error)) { 78 CHIP_DBG(KERN_ERR "\tTX IP header csum error\n"); 79 x->tx_ip_header_error++; 80 } 81 82 if (unlikely(p->des01.etx.payload_error)) { 83 CHIP_DBG(KERN_ERR "\tAddr/Payload csum error\n"); 84 x->tx_payload_error++; 85 dwmac_dma_flush_tx_fifo(ioaddr); 86 } 87 88 ret = -1; 89 } 90 91 if (unlikely(p->des01.etx.deferred)) { 92 CHIP_DBG(KERN_INFO "GMAC TX status: tx deferred\n"); 93 x->tx_deferred++; 94 } 95 #ifdef STMMAC_VLAN_TAG_USED 96 if (p->des01.etx.vlan_frame) { 97 CHIP_DBG(KERN_INFO "GMAC TX status: VLAN frame\n"); 98 x->tx_vlan++; 99 } 100 #endif 101 102 return ret; 103 } 104 105 static int enh_desc_get_tx_len(struct dma_desc *p) 106 { 107 return p->des01.etx.buffer1_size; 108 } 109 110 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) 111 { 112 int ret = good_frame; 113 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 114 115 /* bits 5 7 0 | Frame status 116 * ---------------------------------------------------------- 117 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) 118 * 1 0 0 | IPv4/6 No CSUM errorS. 119 * 1 0 1 | IPv4/6 CSUM PAYLOAD error 120 * 1 1 0 | IPv4/6 CSUM IP HR error 121 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS 122 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD 123 * 0 1 1 | COE bypassed.. no IPv4/6 frame 124 * 0 1 0 | Reserved. 125 */ 126 if (status == 0x0) { 127 CHIP_DBG(KERN_INFO "RX Des0 status: IEEE 802.3 Type frame.\n"); 128 ret = llc_snap; 129 } else if (status == 0x4) { 130 CHIP_DBG(KERN_INFO "RX Des0 status: IPv4/6 No CSUM errorS.\n"); 131 ret = good_frame; 132 } else if (status == 0x5) { 133 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Payload Error.\n"); 134 ret = csum_none; 135 } else if (status == 0x6) { 136 CHIP_DBG(KERN_ERR "RX Des0 status: IPv4/6 Header Error.\n"); 137 ret = csum_none; 138 } else if (status == 0x7) { 139 CHIP_DBG(KERN_ERR 140 "RX Des0 status: IPv4/6 Header and Payload Error.\n"); 141 ret = csum_none; 142 } else if (status == 0x1) { 143 CHIP_DBG(KERN_ERR 144 "RX Des0 status: IPv4/6 unsupported IP PAYLOAD.\n"); 145 ret = discard_frame; 146 } else if (status == 0x3) { 147 CHIP_DBG(KERN_ERR "RX Des0 status: No IPv4, IPv6 frame.\n"); 148 ret = discard_frame; 149 } 150 return ret; 151 } 152 153 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, 154 struct dma_extended_desc *p) 155 { 156 if (unlikely(p->basic.des01.erx.rx_mac_addr)) { 157 if (p->des4.erx.ip_hdr_err) 158 x->ip_hdr_err++; 159 if (p->des4.erx.ip_payload_err) 160 x->ip_payload_err++; 161 if (p->des4.erx.ip_csum_bypassed) 162 x->ip_csum_bypassed++; 163 if (p->des4.erx.ipv4_pkt_rcvd) 164 x->ipv4_pkt_rcvd++; 165 if (p->des4.erx.ipv6_pkt_rcvd) 166 x->ipv6_pkt_rcvd++; 167 if (p->des4.erx.msg_type == RDES_EXT_SYNC) 168 x->rx_msg_type_sync++; 169 else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP) 170 x->rx_msg_type_follow_up++; 171 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 172 x->rx_msg_type_delay_req++; 173 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) 174 x->rx_msg_type_delay_resp++; 175 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 176 x->rx_msg_type_pdelay_req++; 177 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) 178 x->rx_msg_type_pdelay_resp++; 179 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP) 180 x->rx_msg_type_pdelay_follow_up++; 181 else 182 x->rx_msg_type_ext_no_ptp++; 183 if (p->des4.erx.ptp_frame_type) 184 x->ptp_frame_type++; 185 if (p->des4.erx.ptp_ver) 186 x->ptp_ver++; 187 if (p->des4.erx.timestamp_dropped) 188 x->timestamp_dropped++; 189 if (p->des4.erx.av_pkt_rcvd) 190 x->av_pkt_rcvd++; 191 if (p->des4.erx.av_tagged_pkt_rcvd) 192 x->av_tagged_pkt_rcvd++; 193 if (p->des4.erx.vlan_tag_priority_val) 194 x->vlan_tag_priority_val++; 195 if (p->des4.erx.l3_filter_match) 196 x->l3_filter_match++; 197 if (p->des4.erx.l4_filter_match) 198 x->l4_filter_match++; 199 if (p->des4.erx.l3_l4_filter_no_match) 200 x->l3_l4_filter_no_match++; 201 } 202 } 203 204 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 205 struct dma_desc *p) 206 { 207 int ret = good_frame; 208 struct net_device_stats *stats = (struct net_device_stats *)data; 209 210 if (unlikely(p->des01.erx.error_summary)) { 211 CHIP_DBG(KERN_ERR "GMAC RX Error Summary 0x%08x\n", 212 p->des01.erx); 213 if (unlikely(p->des01.erx.descriptor_error)) { 214 CHIP_DBG(KERN_ERR "\tdescriptor error\n"); 215 x->rx_desc++; 216 stats->rx_length_errors++; 217 } 218 if (unlikely(p->des01.erx.overflow_error)) { 219 CHIP_DBG(KERN_ERR "\toverflow error\n"); 220 x->rx_gmac_overflow++; 221 } 222 223 if (unlikely(p->des01.erx.ipc_csum_error)) 224 CHIP_DBG(KERN_ERR "\tIPC Csum Error/Giant frame\n"); 225 226 if (unlikely(p->des01.erx.late_collision)) { 227 CHIP_DBG(KERN_ERR "\tlate_collision error\n"); 228 stats->collisions++; 229 stats->collisions++; 230 } 231 if (unlikely(p->des01.erx.receive_watchdog)) { 232 CHIP_DBG(KERN_ERR "\treceive_watchdog error\n"); 233 x->rx_watchdog++; 234 } 235 if (unlikely(p->des01.erx.error_gmii)) { 236 CHIP_DBG(KERN_ERR "\tReceive Error\n"); 237 x->rx_mii++; 238 } 239 if (unlikely(p->des01.erx.crc_error)) { 240 CHIP_DBG(KERN_ERR "\tCRC error\n"); 241 x->rx_crc++; 242 stats->rx_crc_errors++; 243 } 244 ret = discard_frame; 245 } 246 247 /* After a payload csum error, the ES bit is set. 248 * It doesn't match with the information reported into the databook. 249 * At any rate, we need to understand if the CSUM hw computation is ok 250 * and report this info to the upper layers. */ 251 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 252 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); 253 254 if (unlikely(p->des01.erx.dribbling)) { 255 CHIP_DBG(KERN_ERR "GMAC RX: dribbling error\n"); 256 x->dribbling_bit++; 257 } 258 if (unlikely(p->des01.erx.sa_filter_fail)) { 259 CHIP_DBG(KERN_ERR "GMAC RX : Source Address filter fail\n"); 260 x->sa_rx_filter_fail++; 261 ret = discard_frame; 262 } 263 if (unlikely(p->des01.erx.da_filter_fail)) { 264 CHIP_DBG(KERN_ERR "GMAC RX : Dest Address filter fail\n"); 265 x->da_rx_filter_fail++; 266 ret = discard_frame; 267 } 268 if (unlikely(p->des01.erx.length_error)) { 269 CHIP_DBG(KERN_ERR "GMAC RX: length_error error\n"); 270 x->rx_length++; 271 ret = discard_frame; 272 } 273 #ifdef STMMAC_VLAN_TAG_USED 274 if (p->des01.erx.vlan_tag) { 275 CHIP_DBG(KERN_INFO "GMAC RX: VLAN frame tagged\n"); 276 x->rx_vlan++; 277 } 278 #endif 279 280 return ret; 281 } 282 283 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 284 int mode, int end) 285 { 286 p->des01.erx.own = 1; 287 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; 288 289 if (mode == STMMAC_CHAIN_MODE) 290 ehn_desc_rx_set_on_chain(p, end); 291 else 292 ehn_desc_rx_set_on_ring(p, end); 293 294 if (disable_rx_ic) 295 p->des01.erx.disable_ic = 1; 296 } 297 298 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) 299 { 300 p->des01.etx.own = 0; 301 if (mode == STMMAC_CHAIN_MODE) 302 ehn_desc_tx_set_on_chain(p, end); 303 else 304 ehn_desc_tx_set_on_ring(p, end); 305 } 306 307 static int enh_desc_get_tx_owner(struct dma_desc *p) 308 { 309 return p->des01.etx.own; 310 } 311 312 static int enh_desc_get_rx_owner(struct dma_desc *p) 313 { 314 return p->des01.erx.own; 315 } 316 317 static void enh_desc_set_tx_owner(struct dma_desc *p) 318 { 319 p->des01.etx.own = 1; 320 } 321 322 static void enh_desc_set_rx_owner(struct dma_desc *p) 323 { 324 p->des01.erx.own = 1; 325 } 326 327 static int enh_desc_get_tx_ls(struct dma_desc *p) 328 { 329 return p->des01.etx.last_segment; 330 } 331 332 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) 333 { 334 int ter = p->des01.etx.end_ring; 335 336 memset(p, 0, offsetof(struct dma_desc, des2)); 337 if (mode == STMMAC_CHAIN_MODE) 338 enh_desc_end_tx_desc_on_chain(p, ter); 339 else 340 enh_desc_end_tx_desc_on_ring(p, ter); 341 } 342 343 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 344 int csum_flag, int mode) 345 { 346 p->des01.etx.first_segment = is_fs; 347 348 if (mode == STMMAC_CHAIN_MODE) 349 enh_set_tx_desc_len_on_chain(p, len); 350 else 351 enh_set_tx_desc_len_on_ring(p, len); 352 353 if (likely(csum_flag)) 354 p->des01.etx.checksum_insertion = cic_full; 355 } 356 357 static void enh_desc_clear_tx_ic(struct dma_desc *p) 358 { 359 p->des01.etx.interrupt = 0; 360 } 361 362 static void enh_desc_close_tx_desc(struct dma_desc *p) 363 { 364 p->des01.etx.last_segment = 1; 365 p->des01.etx.interrupt = 1; 366 } 367 368 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) 369 { 370 /* The type-1 checksum offload engines append the checksum at 371 * the end of frame and the two bytes of checksum are added in 372 * the length. 373 * Adjust for that in the framelen for type-1 checksum offload 374 * engines. */ 375 if (rx_coe_type == STMMAC_RX_COE_TYPE1) 376 return p->des01.erx.frame_length - 2; 377 else 378 return p->des01.erx.frame_length; 379 } 380 381 static void enh_desc_enable_tx_timestamp(struct dma_desc *p) 382 { 383 p->des01.etx.time_stamp_enable = 1; 384 } 385 386 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) 387 { 388 return p->des01.etx.time_stamp_status; 389 } 390 391 static u64 enh_desc_get_timestamp(void *desc, u32 ats) 392 { 393 u64 ns; 394 395 if (ats) { 396 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 397 ns = p->des6; 398 /* convert high/sec time stamp value to nanosecond */ 399 ns += p->des7 * 1000000000ULL; 400 } else { 401 struct dma_desc *p = (struct dma_desc *)desc; 402 ns = p->des2; 403 ns += p->des3 * 1000000000ULL; 404 } 405 406 return ns; 407 } 408 409 static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) 410 { 411 if (ats) { 412 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 413 return p->basic.des01.erx.ipc_csum_error; 414 } else { 415 struct dma_desc *p = (struct dma_desc *)desc; 416 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) 417 /* timestamp is corrupted, hence don't store it */ 418 return 0; 419 else 420 return 1; 421 } 422 } 423 424 const struct stmmac_desc_ops enh_desc_ops = { 425 .tx_status = enh_desc_get_tx_status, 426 .rx_status = enh_desc_get_rx_status, 427 .get_tx_len = enh_desc_get_tx_len, 428 .init_rx_desc = enh_desc_init_rx_desc, 429 .init_tx_desc = enh_desc_init_tx_desc, 430 .get_tx_owner = enh_desc_get_tx_owner, 431 .get_rx_owner = enh_desc_get_rx_owner, 432 .release_tx_desc = enh_desc_release_tx_desc, 433 .prepare_tx_desc = enh_desc_prepare_tx_desc, 434 .clear_tx_ic = enh_desc_clear_tx_ic, 435 .close_tx_desc = enh_desc_close_tx_desc, 436 .get_tx_ls = enh_desc_get_tx_ls, 437 .set_tx_owner = enh_desc_set_tx_owner, 438 .set_rx_owner = enh_desc_set_rx_owner, 439 .get_rx_frame_len = enh_desc_get_rx_frame_len, 440 .rx_extended_status = enh_desc_get_ext_status, 441 .enable_tx_timestamp = enh_desc_enable_tx_timestamp, 442 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, 443 .get_timestamp = enh_desc_get_timestamp, 444 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, 445 }; 446