bgmac.c (85a3685852d9ac7d92be9d824533c915a4597fa4) | bgmac.c (b38c83dd08665a93e439c4ffd9eef31bc098a6ea) |
---|---|
1/* 2 * Driver for (BCM4706)? GBit MAC core on BCMA bus. 3 * 4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> 5 * 6 * Licensed under the GNU/GPL. See COPYING for details. 7 */ 8 9#include "bgmac.h" 10 11#include <linux/kernel.h> 12#include <linux/module.h> 13#include <linux/delay.h> 14#include <linux/etherdevice.h> 15#include <linux/mii.h> 16#include <linux/phy.h> | 1/* 2 * Driver for (BCM4706)? GBit MAC core on BCMA bus. 3 * 4 * Copyright (C) 2012 Rafał Miłecki <zajec5@gmail.com> 5 * 6 * Licensed under the GNU/GPL. See COPYING for details. 7 */ 8 9#include "bgmac.h" 10 11#include <linux/kernel.h> 12#include <linux/module.h> 13#include <linux/delay.h> 14#include <linux/etherdevice.h> 15#include <linux/mii.h> 16#include <linux/phy.h> |
17#include <linux/phy_fixed.h> |
|
17#include <linux/interrupt.h> 18#include <linux/dma-mapping.h> 19#include <bcm47xx_nvram.h> 20 21static const struct bcma_device_id bgmac_bcma_tbl[] = { 22 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 24 {}, --- 84 unchanged lines hidden (view full) --- 109 ctl &= ~BGMAC_DMA_TX_PT_MASK; 110 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT; 111 } 112 ctl |= BGMAC_DMA_TX_ENABLE; 113 ctl |= BGMAC_DMA_TX_PARITY_DISABLE; 114 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); 115} 116 | 18#include <linux/interrupt.h> 19#include <linux/dma-mapping.h> 20#include <bcm47xx_nvram.h> 21 22static const struct bcma_device_id bgmac_bcma_tbl[] = { 23 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_4706_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 24 BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_MAC_GBIT, BCMA_ANY_REV, BCMA_ANY_CLASS), 25 {}, --- 84 unchanged lines hidden (view full) --- 110 ctl &= ~BGMAC_DMA_TX_PT_MASK; 111 ctl |= BGMAC_DMA_TX_PT_8 << BGMAC_DMA_TX_PT_SHIFT; 112 } 113 ctl |= BGMAC_DMA_TX_ENABLE; 114 ctl |= BGMAC_DMA_TX_PARITY_DISABLE; 115 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_CTL, ctl); 116} 117 |
118static void 119bgmac_dma_tx_add_buf(struct bgmac *bgmac, struct bgmac_dma_ring *ring, 120 int i, int len, u32 ctl0) 121{ 122 struct bgmac_slot_info *slot; 123 struct bgmac_dma_desc *dma_desc; 124 u32 ctl1; 125 126 if (i == ring->num_slots - 1) 127 ctl0 |= BGMAC_DESC_CTL0_EOT; 128 129 ctl1 = len & BGMAC_DESC_CTL1_LEN; 130 131 slot = &ring->slots[i]; 132 dma_desc = &ring->cpu_base[i]; 133 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); 134 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); 135 dma_desc->ctl0 = cpu_to_le32(ctl0); 136 dma_desc->ctl1 = cpu_to_le32(ctl1); 137} 138 |
|
117static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, 118 struct bgmac_dma_ring *ring, 119 struct sk_buff *skb) 120{ 121 struct device *dma_dev = bgmac->core->dma_dev; 122 struct net_device *net_dev = bgmac->net_dev; | 139static netdev_tx_t bgmac_dma_tx_add(struct bgmac *bgmac, 140 struct bgmac_dma_ring *ring, 141 struct sk_buff *skb) 142{ 143 struct device *dma_dev = bgmac->core->dma_dev; 144 struct net_device *net_dev = bgmac->net_dev; |
123 struct bgmac_dma_desc *dma_desc; 124 struct bgmac_slot_info *slot; 125 u32 ctl0, ctl1; 126 int free_slots; | 145 int index = ring->end % BGMAC_TX_RING_SLOTS; 146 struct bgmac_slot_info *slot = &ring->slots[index]; 147 int nr_frags; 148 u32 flags; 149 int i; |
127 128 if (skb->len > BGMAC_DESC_CTL1_LEN) { 129 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); | 150 151 if (skb->len > BGMAC_DESC_CTL1_LEN) { 152 bgmac_err(bgmac, "Too long skb (%d)\n", skb->len); |
130 goto err_stop_drop; | 153 goto err_drop; |
131 } 132 | 154 } 155 |
133 if (ring->start <= ring->end) 134 free_slots = ring->start - ring->end + BGMAC_TX_RING_SLOTS; 135 else 136 free_slots = ring->start - ring->end; 137 if (free_slots == 1) { | 156 if (skb->ip_summed == CHECKSUM_PARTIAL) 157 skb_checksum_help(skb); 158 159 nr_frags = skb_shinfo(skb)->nr_frags; 160 161 /* ring->end - ring->start will return the number of valid slots, 162 * even when ring->end overflows 163 */ 164 if (ring->end - ring->start + nr_frags + 1 >= BGMAC_TX_RING_SLOTS) { |
138 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); 139 netif_stop_queue(net_dev); 140 return NETDEV_TX_BUSY; 141 } 142 | 165 bgmac_err(bgmac, "TX ring is full, queue should be stopped!\n"); 166 netif_stop_queue(net_dev); 167 return NETDEV_TX_BUSY; 168 } 169 |
143 slot = &ring->slots[ring->end]; 144 slot->skb = skb; 145 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb->len, | 170 slot->dma_addr = dma_map_single(dma_dev, skb->data, skb_headlen(skb), |
146 DMA_TO_DEVICE); | 171 DMA_TO_DEVICE); |
147 if (dma_mapping_error(dma_dev, slot->dma_addr)) { 148 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", 149 ring->mmio_base); 150 goto err_stop_drop; 151 } | 172 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) 173 goto err_dma_head; |
152 | 174 |
153 ctl0 = BGMAC_DESC_CTL0_IOC | BGMAC_DESC_CTL0_SOF | BGMAC_DESC_CTL0_EOF; 154 if (ring->end == ring->num_slots - 1) 155 ctl0 |= BGMAC_DESC_CTL0_EOT; 156 ctl1 = skb->len & BGMAC_DESC_CTL1_LEN; | 175 flags = BGMAC_DESC_CTL0_SOF; 176 if (!nr_frags) 177 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; |
157 | 178 |
158 dma_desc = ring->cpu_base; 159 dma_desc += ring->end; 160 dma_desc->addr_low = cpu_to_le32(lower_32_bits(slot->dma_addr)); 161 dma_desc->addr_high = cpu_to_le32(upper_32_bits(slot->dma_addr)); 162 dma_desc->ctl0 = cpu_to_le32(ctl0); 163 dma_desc->ctl1 = cpu_to_le32(ctl1); | 179 bgmac_dma_tx_add_buf(bgmac, ring, index, skb_headlen(skb), flags); 180 flags = 0; |
164 | 181 |
182 for (i = 0; i < nr_frags; i++) { 183 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 184 int len = skb_frag_size(frag); 185 186 index = (index + 1) % BGMAC_TX_RING_SLOTS; 187 slot = &ring->slots[index]; 188 slot->dma_addr = skb_frag_dma_map(dma_dev, frag, 0, 189 len, DMA_TO_DEVICE); 190 if (unlikely(dma_mapping_error(dma_dev, slot->dma_addr))) 191 goto err_dma; 192 193 if (i == nr_frags - 1) 194 flags |= BGMAC_DESC_CTL0_EOF | BGMAC_DESC_CTL0_IOC; 195 196 bgmac_dma_tx_add_buf(bgmac, ring, index, len, flags); 197 } 198 199 slot->skb = skb; 200 ring->end += nr_frags + 1; |
|
165 netdev_sent_queue(net_dev, skb->len); 166 167 wmb(); 168 169 /* Increase ring->end to point empty slot. We tell hardware the first 170 * slot it should *not* read. 171 */ | 201 netdev_sent_queue(net_dev, skb->len); 202 203 wmb(); 204 205 /* Increase ring->end to point empty slot. We tell hardware the first 206 * slot it should *not* read. 207 */ |
172 if (++ring->end >= BGMAC_TX_RING_SLOTS) 173 ring->end = 0; | |
174 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 175 ring->index_base + | 208 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX, 209 ring->index_base + |
176 ring->end * sizeof(struct bgmac_dma_desc)); | 210 (ring->end % BGMAC_TX_RING_SLOTS) * 211 sizeof(struct bgmac_dma_desc)); |
177 | 212 |
178 /* Always keep one slot free to allow detecting bugged calls. */ 179 if (--free_slots == 1) | 213 if (ring->end - ring->start >= BGMAC_TX_RING_SLOTS - 8) |
180 netif_stop_queue(net_dev); 181 182 return NETDEV_TX_OK; 183 | 214 netif_stop_queue(net_dev); 215 216 return NETDEV_TX_OK; 217 |
184err_stop_drop: 185 netif_stop_queue(net_dev); | 218err_dma: 219 dma_unmap_single(dma_dev, slot->dma_addr, skb_headlen(skb), 220 DMA_TO_DEVICE); 221 222 while (i > 0) { 223 int index = (ring->end + i) % BGMAC_TX_RING_SLOTS; 224 struct bgmac_slot_info *slot = &ring->slots[index]; 225 u32 ctl1 = le32_to_cpu(ring->cpu_base[index].ctl1); 226 int len = ctl1 & BGMAC_DESC_CTL1_LEN; 227 228 dma_unmap_page(dma_dev, slot->dma_addr, len, DMA_TO_DEVICE); 229 } 230 231err_dma_head: 232 bgmac_err(bgmac, "Mapping error of skb on ring 0x%X\n", 233 ring->mmio_base); 234 235err_drop: |
186 dev_kfree_skb(skb); 187 return NETDEV_TX_OK; 188} 189 190/* Free transmitted packets */ 191static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 192{ 193 struct device *dma_dev = bgmac->core->dma_dev; 194 int empty_slot; 195 bool freed = false; 196 unsigned bytes_compl = 0, pkts_compl = 0; 197 198 /* The last slot that hardware didn't consume yet */ 199 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 200 empty_slot &= BGMAC_DMA_TX_STATDPTR; 201 empty_slot -= ring->index_base; 202 empty_slot &= BGMAC_DMA_TX_STATDPTR; 203 empty_slot /= sizeof(struct bgmac_dma_desc); 204 | 236 dev_kfree_skb(skb); 237 return NETDEV_TX_OK; 238} 239 240/* Free transmitted packets */ 241static void bgmac_dma_tx_free(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 242{ 243 struct device *dma_dev = bgmac->core->dma_dev; 244 int empty_slot; 245 bool freed = false; 246 unsigned bytes_compl = 0, pkts_compl = 0; 247 248 /* The last slot that hardware didn't consume yet */ 249 empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS); 250 empty_slot &= BGMAC_DMA_TX_STATDPTR; 251 empty_slot -= ring->index_base; 252 empty_slot &= BGMAC_DMA_TX_STATDPTR; 253 empty_slot /= sizeof(struct bgmac_dma_desc); 254 |
205 while (ring->start != empty_slot) { 206 struct bgmac_slot_info *slot = &ring->slots[ring->start]; | 255 while (ring->start != ring->end) { 256 int slot_idx = ring->start % BGMAC_TX_RING_SLOTS; 257 struct bgmac_slot_info *slot = &ring->slots[slot_idx]; 258 u32 ctl1; 259 int len; |
207 | 260 |
208 if (slot->skb) { | 261 if (slot_idx == empty_slot) 262 break; 263 264 ctl1 = le32_to_cpu(ring->cpu_base[slot_idx].ctl1); 265 len = ctl1 & BGMAC_DESC_CTL1_LEN; 266 if (ctl1 & BGMAC_DESC_CTL0_SOF) |
209 /* Unmap no longer used buffer */ | 267 /* Unmap no longer used buffer */ |
210 dma_unmap_single(dma_dev, slot->dma_addr, 211 slot->skb->len, DMA_TO_DEVICE); 212 slot->dma_addr = 0; | 268 dma_unmap_single(dma_dev, slot->dma_addr, len, 269 DMA_TO_DEVICE); 270 else 271 dma_unmap_page(dma_dev, slot->dma_addr, len, 272 DMA_TO_DEVICE); |
213 | 273 |
274 if (slot->skb) { |
|
214 bytes_compl += slot->skb->len; 215 pkts_compl++; 216 217 /* Free memory! :) */ 218 dev_kfree_skb(slot->skb); 219 slot->skb = NULL; | 275 bytes_compl += slot->skb->len; 276 pkts_compl++; 277 278 /* Free memory! :) */ 279 dev_kfree_skb(slot->skb); 280 slot->skb = NULL; |
220 } else { 221 bgmac_err(bgmac, "Hardware reported transmission for empty TX ring slot %d! End of ring: %d\n", 222 ring->start, ring->end); | |
223 } 224 | 281 } 282 |
225 if (++ring->start >= BGMAC_TX_RING_SLOTS) 226 ring->start = 0; | 283 slot->dma_addr = 0; 284 ring->start++; |
227 freed = true; 228 } 229 | 285 freed = true; 286 } 287 |
288 if (!pkts_compl) 289 return; 290 |
|
230 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); 231 | 291 netdev_completed_queue(bgmac->net_dev, pkts_compl, bytes_compl); 292 |
232 if (freed && netif_queue_stopped(bgmac->net_dev)) | 293 if (netif_queue_stopped(bgmac->net_dev)) |
233 netif_wake_queue(bgmac->net_dev); 234} 235 236static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 237{ 238 if (!ring->mmio_base) 239 return; 240 --- 29 unchanged lines hidden (view full) --- 270 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; 271 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); 272} 273 274static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, 275 struct bgmac_slot_info *slot) 276{ 277 struct device *dma_dev = bgmac->core->dma_dev; | 294 netif_wake_queue(bgmac->net_dev); 295} 296 297static void bgmac_dma_rx_reset(struct bgmac *bgmac, struct bgmac_dma_ring *ring) 298{ 299 if (!ring->mmio_base) 300 return; 301 --- 29 unchanged lines hidden (view full) --- 331 ctl |= BGMAC_RX_FRAME_OFFSET << BGMAC_DMA_RX_FRAME_OFFSET_SHIFT; 332 bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_CTL, ctl); 333} 334 335static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, 336 struct bgmac_slot_info *slot) 337{ 338 struct device *dma_dev = bgmac->core->dma_dev; |
278 struct sk_buff *skb; | |
279 dma_addr_t dma_addr; 280 struct bgmac_rx_header *rx; | 339 dma_addr_t dma_addr; 340 struct bgmac_rx_header *rx; |
341 void *buf; |
|
281 282 /* Alloc skb */ | 342 343 /* Alloc skb */ |
283 skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE); 284 if (!skb) | 344 buf = netdev_alloc_frag(BGMAC_RX_ALLOC_SIZE); 345 if (!buf) |
285 return -ENOMEM; 286 287 /* Poison - if everything goes fine, hardware will overwrite it */ | 346 return -ENOMEM; 347 348 /* Poison - if everything goes fine, hardware will overwrite it */ |
288 rx = (struct bgmac_rx_header *)skb->data; | 349 rx = buf; |
289 rx->len = cpu_to_le16(0xdead); 290 rx->flags = cpu_to_le16(0xbeef); 291 292 /* Map skb for the DMA */ | 350 rx->len = cpu_to_le16(0xdead); 351 rx->flags = cpu_to_le16(0xbeef); 352 353 /* Map skb for the DMA */ |
293 dma_addr = dma_map_single(dma_dev, skb->data, 294 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); | 354 dma_addr = dma_map_single(dma_dev, buf, BGMAC_RX_BUF_SIZE, 355 DMA_FROM_DEVICE); |
295 if (dma_mapping_error(dma_dev, dma_addr)) { 296 bgmac_err(bgmac, "DMA mapping error\n"); | 356 if (dma_mapping_error(dma_dev, dma_addr)) { 357 bgmac_err(bgmac, "DMA mapping error\n"); |
297 dev_kfree_skb(skb); | 358 put_page(virt_to_head_page(buf)); |
298 return -ENOMEM; 299 } 300 301 /* Update the slot */ | 359 return -ENOMEM; 360 } 361 362 /* Update the slot */ |
302 slot->skb = skb; | 363 slot->buf = buf; |
303 slot->dma_addr = dma_addr; 304 305 return 0; 306} 307 308static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, 309 struct bgmac_dma_ring *ring, int desc_idx) 310{ --- 26 unchanged lines hidden (view full) --- 337 end_slot &= BGMAC_DMA_RX_STATDPTR; 338 end_slot /= sizeof(struct bgmac_dma_desc); 339 340 ring->end = end_slot; 341 342 while (ring->start != ring->end) { 343 struct device *dma_dev = bgmac->core->dma_dev; 344 struct bgmac_slot_info *slot = &ring->slots[ring->start]; | 364 slot->dma_addr = dma_addr; 365 366 return 0; 367} 368 369static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac, 370 struct bgmac_dma_ring *ring, int desc_idx) 371{ --- 26 unchanged lines hidden (view full) --- 398 end_slot &= BGMAC_DMA_RX_STATDPTR; 399 end_slot /= sizeof(struct bgmac_dma_desc); 400 401 ring->end = end_slot; 402 403 while (ring->start != ring->end) { 404 struct device *dma_dev = bgmac->core->dma_dev; 405 struct bgmac_slot_info *slot = &ring->slots[ring->start]; |
345 struct sk_buff *skb = slot->skb; 346 struct bgmac_rx_header *rx; | 406 struct bgmac_rx_header *rx = slot->buf; 407 struct sk_buff *skb; 408 void *buf = slot->buf; |
347 u16 len, flags; 348 349 /* Unmap buffer to make it accessible to the CPU */ 350 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, 351 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 352 353 /* Get info from the header */ | 409 u16 len, flags; 410 411 /* Unmap buffer to make it accessible to the CPU */ 412 dma_sync_single_for_cpu(dma_dev, slot->dma_addr, 413 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 414 415 /* Get info from the header */ |
354 rx = (struct bgmac_rx_header *)skb->data; | |
355 len = le16_to_cpu(rx->len); 356 flags = le16_to_cpu(rx->flags); 357 358 do { 359 dma_addr_t old_dma_addr = slot->dma_addr; 360 int err; 361 362 /* Check for poison and drop or pass the packet */ --- 24 unchanged lines hidden (view full) --- 387 break; 388 } 389 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); 390 391 /* Unmap old skb, we'll pass it to the netfif */ 392 dma_unmap_single(dma_dev, old_dma_addr, 393 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 394 | 416 len = le16_to_cpu(rx->len); 417 flags = le16_to_cpu(rx->flags); 418 419 do { 420 dma_addr_t old_dma_addr = slot->dma_addr; 421 int err; 422 423 /* Check for poison and drop or pass the packet */ --- 24 unchanged lines hidden (view full) --- 448 break; 449 } 450 bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); 451 452 /* Unmap old skb, we'll pass it to the netfif */ 453 dma_unmap_single(dma_dev, old_dma_addr, 454 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); 455 |
456 skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); |
|
395 skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); 396 skb_pull(skb, BGMAC_RX_FRAME_OFFSET); 397 398 skb_checksum_none_assert(skb); 399 skb->protocol = eth_type_trans(skb, bgmac->net_dev); | 457 skb_put(skb, BGMAC_RX_FRAME_OFFSET + len); 458 skb_pull(skb, BGMAC_RX_FRAME_OFFSET); 459 460 skb_checksum_none_assert(skb); 461 skb->protocol = eth_type_trans(skb, bgmac->net_dev); |
400 netif_receive_skb(skb); | 462 napi_gro_receive(&bgmac->napi, skb); |
401 handled++; 402 } while (0); 403 404 if (++ring->start >= BGMAC_RX_RING_SLOTS) 405 ring->start = 0; 406 407 if (handled >= weight) /* Should never be greater */ 408 break; --- 19 unchanged lines hidden (view full) --- 428 0xff0); 429 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) 430 return true; 431 break; 432 } 433 return false; 434} 435 | 463 handled++; 464 } while (0); 465 466 if (++ring->start >= BGMAC_RX_RING_SLOTS) 467 ring->start = 0; 468 469 if (handled >= weight) /* Should never be greater */ 470 break; --- 19 unchanged lines hidden (view full) --- 490 0xff0); 491 if (bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO)) 492 return true; 493 break; 494 } 495 return false; 496} 497 |
436static void bgmac_dma_ring_free(struct bgmac *bgmac, 437 struct bgmac_dma_ring *ring) | 498static void bgmac_dma_tx_ring_free(struct bgmac *bgmac, 499 struct bgmac_dma_ring *ring) |
438{ 439 struct device *dma_dev = bgmac->core->dma_dev; | 500{ 501 struct device *dma_dev = bgmac->core->dma_dev; |
502 struct bgmac_dma_desc *dma_desc = ring->cpu_base; |
|
440 struct bgmac_slot_info *slot; | 503 struct bgmac_slot_info *slot; |
441 int size; | |
442 int i; 443 444 for (i = 0; i < ring->num_slots; i++) { | 504 int i; 505 506 for (i = 0; i < ring->num_slots; i++) { |
507 int len = dma_desc[i].ctl1 & BGMAC_DESC_CTL1_LEN; 508 |
|
445 slot = &ring->slots[i]; | 509 slot = &ring->slots[i]; |
446 if (slot->skb) { 447 if (slot->dma_addr) 448 dma_unmap_single(dma_dev, slot->dma_addr, 449 slot->skb->len, DMA_TO_DEVICE); 450 dev_kfree_skb(slot->skb); 451 } | 510 dev_kfree_skb(slot->skb); 511 512 if (!slot->dma_addr) 513 continue; 514 515 if (slot->skb) 516 dma_unmap_single(dma_dev, slot->dma_addr, 517 len, DMA_TO_DEVICE); 518 else 519 dma_unmap_page(dma_dev, slot->dma_addr, 520 len, DMA_TO_DEVICE); |
452 } | 521 } |
522} |
|
453 | 523 |
454 if (ring->cpu_base) { 455 /* Free ring of descriptors */ 456 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 457 dma_free_coherent(dma_dev, size, ring->cpu_base, 458 ring->dma_base); | 524static void bgmac_dma_rx_ring_free(struct bgmac *bgmac, 525 struct bgmac_dma_ring *ring) 526{ 527 struct device *dma_dev = bgmac->core->dma_dev; 528 struct bgmac_slot_info *slot; 529 int i; 530 531 for (i = 0; i < ring->num_slots; i++) { 532 slot = &ring->slots[i]; 533 if (!slot->buf) 534 continue; 535 536 if (slot->dma_addr) 537 dma_unmap_single(dma_dev, slot->dma_addr, 538 BGMAC_RX_BUF_SIZE, 539 DMA_FROM_DEVICE); 540 put_page(virt_to_head_page(slot->buf)); |
459 } 460} 461 | 541 } 542} 543 |
544static void bgmac_dma_ring_desc_free(struct bgmac *bgmac, 545 struct bgmac_dma_ring *ring) 546{ 547 struct device *dma_dev = bgmac->core->dma_dev; 548 int size; 549 550 if (!ring->cpu_base) 551 return; 552 553 /* Free ring of descriptors */ 554 size = ring->num_slots * sizeof(struct bgmac_dma_desc); 555 dma_free_coherent(dma_dev, size, ring->cpu_base, 556 ring->dma_base); 557} 558 |
|
462static void bgmac_dma_free(struct bgmac *bgmac) 463{ 464 int i; 465 | 559static void bgmac_dma_free(struct bgmac *bgmac) 560{ 561 int i; 562 |
466 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) 467 bgmac_dma_ring_free(bgmac, &bgmac->tx_ring[i]); 468 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) 469 bgmac_dma_ring_free(bgmac, &bgmac->rx_ring[i]); | 563 for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { 564 bgmac_dma_tx_ring_free(bgmac, &bgmac->tx_ring[i]); 565 bgmac_dma_ring_desc_free(bgmac, &bgmac->tx_ring[i]); 566 } 567 for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { 568 bgmac_dma_rx_ring_free(bgmac, &bgmac->rx_ring[i]); 569 bgmac_dma_ring_desc_free(bgmac, &bgmac->rx_ring[i]); 570 } |
470} 471 472static int bgmac_dma_alloc(struct bgmac *bgmac) 473{ 474 struct device *dma_dev = bgmac->core->dma_dev; 475 struct bgmac_dma_ring *ring; 476 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, 477 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; --- 847 unchanged lines hidden (view full) --- 1325 } 1326 1327 if (update) { 1328 bgmac_mac_speed(bgmac); 1329 phy_print_status(phy_dev); 1330 } 1331} 1332 | 571} 572 573static int bgmac_dma_alloc(struct bgmac *bgmac) 574{ 575 struct device *dma_dev = bgmac->core->dma_dev; 576 struct bgmac_dma_ring *ring; 577 static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, 578 BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; --- 847 unchanged lines hidden (view full) --- 1426 } 1427 1428 if (update) { 1429 bgmac_mac_speed(bgmac); 1430 phy_print_status(phy_dev); 1431 } 1432} 1433 |
1434static int bgmac_fixed_phy_register(struct bgmac *bgmac) 1435{ 1436 struct fixed_phy_status fphy_status = { 1437 .link = 1, 1438 .speed = SPEED_1000, 1439 .duplex = DUPLEX_FULL, 1440 }; 1441 struct phy_device *phy_dev; 1442 int err; 1443 1444 phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); 1445 if (!phy_dev || IS_ERR(phy_dev)) { 1446 bgmac_err(bgmac, "Failed to register fixed PHY device\n"); 1447 return -ENODEV; 1448 } 1449 1450 err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, 1451 PHY_INTERFACE_MODE_MII); 1452 if (err) { 1453 bgmac_err(bgmac, "Connecting PHY failed\n"); 1454 return err; 1455 } 1456 1457 bgmac->phy_dev = phy_dev; 1458 1459 return err; 1460} 1461 |
|
1333static int bgmac_mii_register(struct bgmac *bgmac) 1334{ | 1462static int bgmac_mii_register(struct bgmac *bgmac) 1463{ |
1464 struct bcma_chipinfo *ci = &bgmac->core->bus->chipinfo; |
|
1335 struct mii_bus *mii_bus; 1336 struct phy_device *phy_dev; 1337 char bus_id[MII_BUS_ID_SIZE + 3]; 1338 int i, err = 0; 1339 | 1465 struct mii_bus *mii_bus; 1466 struct phy_device *phy_dev; 1467 char bus_id[MII_BUS_ID_SIZE + 3]; 1468 int i, err = 0; 1469 |
1470 if (ci->id == BCMA_CHIP_ID_BCM4707 || 1471 ci->id == BCMA_CHIP_ID_BCM53018) 1472 return bgmac_fixed_phy_register(bgmac); 1473 |
|
1340 mii_bus = mdiobus_alloc(); 1341 if (!mii_bus) 1342 return -ENOMEM; 1343 1344 mii_bus->name = "bgmac mii bus"; 1345 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, 1346 bgmac->core->core_unit); 1347 mii_bus->priv = bgmac; --- 164 unchanged lines hidden (view full) --- 1512 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); 1513 1514 err = bgmac_mii_register(bgmac); 1515 if (err) { 1516 bgmac_err(bgmac, "Cannot register MDIO\n"); 1517 goto err_dma_free; 1518 } 1519 | 1474 mii_bus = mdiobus_alloc(); 1475 if (!mii_bus) 1476 return -ENOMEM; 1477 1478 mii_bus->name = "bgmac mii bus"; 1479 sprintf(mii_bus->id, "%s-%d-%d", "bgmac", bgmac->core->bus->num, 1480 bgmac->core->core_unit); 1481 mii_bus->priv = bgmac; --- 164 unchanged lines hidden (view full) --- 1646 netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); 1647 1648 err = bgmac_mii_register(bgmac); 1649 if (err) { 1650 bgmac_err(bgmac, "Cannot register MDIO\n"); 1651 goto err_dma_free; 1652 } 1653 |
1654 net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; 1655 net_dev->hw_features = net_dev->features; 1656 net_dev->vlan_features = net_dev->features; 1657 |
|
1520 err = register_netdev(bgmac->net_dev); 1521 if (err) { 1522 bgmac_err(bgmac, "Cannot register net device\n"); 1523 goto err_mii_unregister; 1524 } 1525 1526 netif_carrier_off(net_dev); 1527 --- 55 unchanged lines hidden --- | 1658 err = register_netdev(bgmac->net_dev); 1659 if (err) { 1660 bgmac_err(bgmac, "Cannot register net device\n"); 1661 goto err_mii_unregister; 1662 } 1663 1664 netif_carrier_off(net_dev); 1665 --- 55 unchanged lines hidden --- |