1 /******************************************************************************* 2 This contains the functions to handle the enhanced descriptors. 3 4 Copyright (C) 2007-2009 STMicroelectronics Ltd 5 6 This program is free software; you can redistribute it and/or modify it 7 under the terms and conditions of the GNU General Public License, 8 version 2, as published by the Free Software Foundation. 9 10 This program is distributed in the hope it will be useful, but WITHOUT 11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 more details. 14 15 You should have received a copy of the GNU General Public License along with 16 this program; if not, write to the Free Software Foundation, Inc., 17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. 18 19 The full GNU General Public License is included in this distribution in 20 the file called "COPYING". 21 22 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 23 *******************************************************************************/ 24 25 #include <linux/stmmac.h> 26 #include "common.h" 27 #include "descs_com.h" 28 29 static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x, 30 struct dma_desc *p, void __iomem *ioaddr) 31 { 32 int ret = 0; 33 struct net_device_stats *stats = (struct net_device_stats *)data; 34 35 if (unlikely(p->des01.etx.error_summary)) { 36 if (unlikely(p->des01.etx.jabber_timeout)) 37 x->tx_jabber++; 38 39 if (unlikely(p->des01.etx.frame_flushed)) { 40 x->tx_frame_flushed++; 41 dwmac_dma_flush_tx_fifo(ioaddr); 42 } 43 44 if (unlikely(p->des01.etx.loss_carrier)) { 45 x->tx_losscarrier++; 46 stats->tx_carrier_errors++; 47 } 48 if (unlikely(p->des01.etx.no_carrier)) { 49 x->tx_carrier++; 50 stats->tx_carrier_errors++; 51 } 52 if (unlikely(p->des01.etx.late_collision)) 53 stats->collisions += p->des01.etx.collision_count; 54 55 if (unlikely(p->des01.etx.excessive_collisions)) 56 stats->collisions += p->des01.etx.collision_count; 57 58 if (unlikely(p->des01.etx.excessive_deferral)) 59 x->tx_deferred++; 60 61 if (unlikely(p->des01.etx.underflow_error)) { 62 dwmac_dma_flush_tx_fifo(ioaddr); 63 x->tx_underflow++; 64 } 65 66 if (unlikely(p->des01.etx.ip_header_error)) 67 x->tx_ip_header_error++; 68 69 if (unlikely(p->des01.etx.payload_error)) { 70 x->tx_payload_error++; 71 dwmac_dma_flush_tx_fifo(ioaddr); 72 } 73 74 ret = -1; 75 } 76 77 if (unlikely(p->des01.etx.deferred)) 78 x->tx_deferred++; 79 80 #ifdef STMMAC_VLAN_TAG_USED 81 if (p->des01.etx.vlan_frame) 82 x->tx_vlan++; 83 #endif 84 85 return ret; 86 } 87 88 static int enh_desc_get_tx_len(struct dma_desc *p) 89 { 90 return p->des01.etx.buffer1_size; 91 } 92 93 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err) 94 { 95 int ret = good_frame; 96 u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7; 97 98 /* bits 5 7 0 | Frame status 99 * ---------------------------------------------------------- 100 * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects) 101 * 1 0 0 | IPv4/6 No CSUM errorS. 102 * 1 0 1 | IPv4/6 CSUM PAYLOAD error 103 * 1 1 0 | IPv4/6 CSUM IP HR error 104 * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS 105 * 0 0 1 | IPv4/6 unsupported IP PAYLOAD 106 * 0 1 1 | COE bypassed.. no IPv4/6 frame 107 * 0 1 0 | Reserved. 108 */ 109 if (status == 0x0) 110 ret = llc_snap; 111 else if (status == 0x4) 112 ret = good_frame; 113 else if (status == 0x5) 114 ret = csum_none; 115 else if (status == 0x6) 116 ret = csum_none; 117 else if (status == 0x7) 118 ret = csum_none; 119 else if (status == 0x1) 120 ret = discard_frame; 121 else if (status == 0x3) 122 ret = discard_frame; 123 return ret; 124 } 125 126 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x, 127 struct dma_extended_desc *p) 128 { 129 if (unlikely(p->basic.des01.erx.rx_mac_addr)) { 130 if (p->des4.erx.ip_hdr_err) 131 x->ip_hdr_err++; 132 if (p->des4.erx.ip_payload_err) 133 x->ip_payload_err++; 134 if (p->des4.erx.ip_csum_bypassed) 135 x->ip_csum_bypassed++; 136 if (p->des4.erx.ipv4_pkt_rcvd) 137 x->ipv4_pkt_rcvd++; 138 if (p->des4.erx.ipv6_pkt_rcvd) 139 x->ipv6_pkt_rcvd++; 140 if (p->des4.erx.msg_type == RDES_EXT_SYNC) 141 x->rx_msg_type_sync++; 142 else if (p->des4.erx.msg_type == RDES_EXT_FOLLOW_UP) 143 x->rx_msg_type_follow_up++; 144 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 145 x->rx_msg_type_delay_req++; 146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) 147 x->rx_msg_type_delay_resp++; 148 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 149 x->rx_msg_type_pdelay_req++; 150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) 151 x->rx_msg_type_pdelay_resp++; 152 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_FOLLOW_UP) 153 x->rx_msg_type_pdelay_follow_up++; 154 else 155 x->rx_msg_type_ext_no_ptp++; 156 if (p->des4.erx.ptp_frame_type) 157 x->ptp_frame_type++; 158 if (p->des4.erx.ptp_ver) 159 x->ptp_ver++; 160 if (p->des4.erx.timestamp_dropped) 161 x->timestamp_dropped++; 162 if (p->des4.erx.av_pkt_rcvd) 163 x->av_pkt_rcvd++; 164 if (p->des4.erx.av_tagged_pkt_rcvd) 165 x->av_tagged_pkt_rcvd++; 166 if (p->des4.erx.vlan_tag_priority_val) 167 x->vlan_tag_priority_val++; 168 if (p->des4.erx.l3_filter_match) 169 x->l3_filter_match++; 170 if (p->des4.erx.l4_filter_match) 171 x->l4_filter_match++; 172 if (p->des4.erx.l3_l4_filter_no_match) 173 x->l3_l4_filter_no_match++; 174 } 175 } 176 177 static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x, 178 struct dma_desc *p) 179 { 180 int ret = good_frame; 181 struct net_device_stats *stats = (struct net_device_stats *)data; 182 183 if (unlikely(p->des01.erx.error_summary)) { 184 if (unlikely(p->des01.erx.descriptor_error)) { 185 x->rx_desc++; 186 stats->rx_length_errors++; 187 } 188 if (unlikely(p->des01.erx.overflow_error)) 189 x->rx_gmac_overflow++; 190 191 if (unlikely(p->des01.erx.ipc_csum_error)) 192 pr_err("\tIPC Csum Error/Giant frame\n"); 193 194 if (unlikely(p->des01.erx.late_collision)) { 195 stats->collisions++; 196 } 197 if (unlikely(p->des01.erx.receive_watchdog)) 198 x->rx_watchdog++; 199 200 if (unlikely(p->des01.erx.error_gmii)) 201 x->rx_mii++; 202 203 if (unlikely(p->des01.erx.crc_error)) { 204 x->rx_crc++; 205 stats->rx_crc_errors++; 206 } 207 ret = discard_frame; 208 } 209 210 /* After a payload csum error, the ES bit is set. 211 * It doesn't match with the information reported into the databook. 212 * At any rate, we need to understand if the CSUM hw computation is ok 213 * and report this info to the upper layers. */ 214 ret = enh_desc_coe_rdes0(p->des01.erx.ipc_csum_error, 215 p->des01.erx.frame_type, p->des01.erx.rx_mac_addr); 216 217 if (unlikely(p->des01.erx.dribbling)) 218 x->dribbling_bit++; 219 220 if (unlikely(p->des01.erx.sa_filter_fail)) { 221 x->sa_rx_filter_fail++; 222 ret = discard_frame; 223 } 224 if (unlikely(p->des01.erx.da_filter_fail)) { 225 x->da_rx_filter_fail++; 226 ret = discard_frame; 227 } 228 if (unlikely(p->des01.erx.length_error)) { 229 x->rx_length++; 230 ret = discard_frame; 231 } 232 #ifdef STMMAC_VLAN_TAG_USED 233 if (p->des01.erx.vlan_tag) 234 x->rx_vlan++; 235 #endif 236 237 return ret; 238 } 239 240 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, 241 int mode, int end) 242 { 243 p->des01.erx.own = 1; 244 p->des01.erx.buffer1_size = BUF_SIZE_8KiB - 1; 245 246 if (mode == STMMAC_CHAIN_MODE) 247 ehn_desc_rx_set_on_chain(p, end); 248 else 249 ehn_desc_rx_set_on_ring(p, end); 250 251 if (disable_rx_ic) 252 p->des01.erx.disable_ic = 1; 253 } 254 255 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end) 256 { 257 p->des01.etx.own = 0; 258 if (mode == STMMAC_CHAIN_MODE) 259 ehn_desc_tx_set_on_chain(p, end); 260 else 261 ehn_desc_tx_set_on_ring(p, end); 262 } 263 264 static int enh_desc_get_tx_owner(struct dma_desc *p) 265 { 266 return p->des01.etx.own; 267 } 268 269 static int enh_desc_get_rx_owner(struct dma_desc *p) 270 { 271 return p->des01.erx.own; 272 } 273 274 static void enh_desc_set_tx_owner(struct dma_desc *p) 275 { 276 p->des01.etx.own = 1; 277 } 278 279 static void enh_desc_set_rx_owner(struct dma_desc *p) 280 { 281 p->des01.erx.own = 1; 282 } 283 284 static int enh_desc_get_tx_ls(struct dma_desc *p) 285 { 286 return p->des01.etx.last_segment; 287 } 288 289 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode) 290 { 291 int ter = p->des01.etx.end_ring; 292 293 memset(p, 0, offsetof(struct dma_desc, des2)); 294 if (mode == STMMAC_CHAIN_MODE) 295 enh_desc_end_tx_desc_on_chain(p, ter); 296 else 297 enh_desc_end_tx_desc_on_ring(p, ter); 298 } 299 300 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len, 301 int csum_flag, int mode) 302 { 303 p->des01.etx.first_segment = is_fs; 304 305 if (mode == STMMAC_CHAIN_MODE) 306 enh_set_tx_desc_len_on_chain(p, len); 307 else 308 enh_set_tx_desc_len_on_ring(p, len); 309 310 if (likely(csum_flag)) 311 p->des01.etx.checksum_insertion = cic_full; 312 } 313 314 static void enh_desc_clear_tx_ic(struct dma_desc *p) 315 { 316 p->des01.etx.interrupt = 0; 317 } 318 319 static void enh_desc_close_tx_desc(struct dma_desc *p) 320 { 321 p->des01.etx.last_segment = 1; 322 p->des01.etx.interrupt = 1; 323 } 324 325 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type) 326 { 327 /* The type-1 checksum offload engines append the checksum at 328 * the end of frame and the two bytes of checksum are added in 329 * the length. 330 * Adjust for that in the framelen for type-1 checksum offload 331 * engines. */ 332 if (rx_coe_type == STMMAC_RX_COE_TYPE1) 333 return p->des01.erx.frame_length - 2; 334 else 335 return p->des01.erx.frame_length; 336 } 337 338 static void enh_desc_enable_tx_timestamp(struct dma_desc *p) 339 { 340 p->des01.etx.time_stamp_enable = 1; 341 } 342 343 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p) 344 { 345 return p->des01.etx.time_stamp_status; 346 } 347 348 static u64 enh_desc_get_timestamp(void *desc, u32 ats) 349 { 350 u64 ns; 351 352 if (ats) { 353 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 354 ns = p->des6; 355 /* convert high/sec time stamp value to nanosecond */ 356 ns += p->des7 * 1000000000ULL; 357 } else { 358 struct dma_desc *p = (struct dma_desc *)desc; 359 ns = p->des2; 360 ns += p->des3 * 1000000000ULL; 361 } 362 363 return ns; 364 } 365 366 static int enh_desc_get_rx_timestamp_status(void *desc, u32 ats) 367 { 368 if (ats) { 369 struct dma_extended_desc *p = (struct dma_extended_desc *)desc; 370 return p->basic.des01.erx.ipc_csum_error; 371 } else { 372 struct dma_desc *p = (struct dma_desc *)desc; 373 if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff)) 374 /* timestamp is corrupted, hence don't store it */ 375 return 0; 376 else 377 return 1; 378 } 379 } 380 381 const struct stmmac_desc_ops enh_desc_ops = { 382 .tx_status = enh_desc_get_tx_status, 383 .rx_status = enh_desc_get_rx_status, 384 .get_tx_len = enh_desc_get_tx_len, 385 .init_rx_desc = enh_desc_init_rx_desc, 386 .init_tx_desc = enh_desc_init_tx_desc, 387 .get_tx_owner = enh_desc_get_tx_owner, 388 .get_rx_owner = enh_desc_get_rx_owner, 389 .release_tx_desc = enh_desc_release_tx_desc, 390 .prepare_tx_desc = enh_desc_prepare_tx_desc, 391 .clear_tx_ic = enh_desc_clear_tx_ic, 392 .close_tx_desc = enh_desc_close_tx_desc, 393 .get_tx_ls = enh_desc_get_tx_ls, 394 .set_tx_owner = enh_desc_set_tx_owner, 395 .set_rx_owner = enh_desc_set_rx_owner, 396 .get_rx_frame_len = enh_desc_get_rx_frame_len, 397 .rx_extended_status = enh_desc_get_ext_status, 398 .enable_tx_timestamp = enh_desc_enable_tx_timestamp, 399 .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status, 400 .get_timestamp = enh_desc_get_timestamp, 401 .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status, 402 }; 403