1 /* This program is free software; you can redistribute it and/or modify 2 * it under the terms of the GNU General Public License version 2 3 * as published by the Free Software Foundation. 4 * 5 * This program is distributed in the hope that it will be useful, 6 * but WITHOUT ANY WARRANTY; without even the implied warranty of 7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 8 * GNU General Public License for more details. 9 */ 10 11 #include <net/6lowpan.h> 12 #include <net/ieee802154_netdev.h> 13 14 #include "6lowpan_i.h" 15 16 /* don't save pan id, it's intra pan */ 17 struct lowpan_addr { 18 u8 mode; 19 union { 20 /* IPv6 needs big endian here */ 21 __be64 extended_addr; 22 __be16 short_addr; 23 } u; 24 }; 25 26 struct lowpan_addr_info { 27 struct lowpan_addr daddr; 28 struct lowpan_addr saddr; 29 }; 30 31 static inline struct 32 lowpan_addr_info *lowpan_skb_priv(const struct sk_buff *skb) 33 { 34 WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct lowpan_addr_info)); 35 return (struct lowpan_addr_info *)(skb->data - 36 sizeof(struct lowpan_addr_info)); 37 } 38 39 int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev, 40 unsigned short type, const void *_daddr, 41 const void *_saddr, unsigned int len) 42 { 43 const u8 *saddr = _saddr; 44 const u8 *daddr = _daddr; 45 struct lowpan_addr_info *info; 46 47 /* TODO: 48 * if this package isn't ipv6 one, where should it be routed? 49 */ 50 if (type != ETH_P_IPV6) 51 return 0; 52 53 if (!saddr) 54 saddr = ldev->dev_addr; 55 56 raw_dump_inline(__func__, "saddr", (unsigned char *)saddr, 8); 57 raw_dump_inline(__func__, "daddr", (unsigned char *)daddr, 8); 58 59 info = lowpan_skb_priv(skb); 60 61 /* TODO: Currently we only support extended_addr */ 62 info->daddr.mode = IEEE802154_ADDR_LONG; 63 memcpy(&info->daddr.u.extended_addr, daddr, 64 sizeof(info->daddr.u.extended_addr)); 65 info->saddr.mode = IEEE802154_ADDR_LONG; 66 memcpy(&info->saddr.u.extended_addr, saddr, 67 sizeof(info->daddr.u.extended_addr)); 68 69 return 0; 70 } 71 72 static struct sk_buff* 73 lowpan_alloc_frag(struct sk_buff *skb, int size, 74 const struct ieee802154_hdr *master_hdr) 75 { 76 struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev; 77 struct sk_buff *frag; 78 int rc; 79 80 frag = alloc_skb(wdev->hard_header_len + wdev->needed_tailroom + size, 81 GFP_ATOMIC); 82 83 if (likely(frag)) { 84 frag->dev = wdev; 85 frag->priority = skb->priority; 86 skb_reserve(frag, wdev->hard_header_len); 87 skb_reset_network_header(frag); 88 *mac_cb(frag) = *mac_cb(skb); 89 90 rc = dev_hard_header(frag, wdev, 0, &master_hdr->dest, 91 &master_hdr->source, size); 92 if (rc < 0) { 93 kfree_skb(frag); 94 return ERR_PTR(rc); 95 } 96 } else { 97 frag = ERR_PTR(-ENOMEM); 98 } 99 100 return frag; 101 } 102 103 static int 104 lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr, 105 u8 *frag_hdr, int frag_hdrlen, 106 int offset, int len) 107 { 108 struct sk_buff *frag; 109 110 raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen); 111 112 frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr); 113 if (IS_ERR(frag)) 114 return PTR_ERR(frag); 115 116 memcpy(skb_put(frag, frag_hdrlen), frag_hdr, frag_hdrlen); 117 memcpy(skb_put(frag, len), skb_network_header(skb) + offset, len); 118 119 raw_dump_table(__func__, " fragment dump", frag->data, frag->len); 120 121 return dev_queue_xmit(frag); 122 } 123 124 static int 125 lowpan_xmit_fragmented(struct sk_buff *skb, struct net_device *ldev, 126 const struct ieee802154_hdr *wpan_hdr, u16 dgram_size, 127 u16 dgram_offset) 128 { 129 __be16 frag_tag; 130 u8 frag_hdr[5]; 131 int frag_cap, frag_len, payload_cap, rc; 132 int skb_unprocessed, skb_offset; 133 134 frag_tag = htons(lowpan_dev_info(ldev)->fragment_tag); 135 lowpan_dev_info(ldev)->fragment_tag++; 136 137 frag_hdr[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x07); 138 frag_hdr[1] = dgram_size & 0xff; 139 memcpy(frag_hdr + 2, &frag_tag, sizeof(frag_tag)); 140 141 payload_cap = ieee802154_max_payload(wpan_hdr); 142 143 frag_len = round_down(payload_cap - LOWPAN_FRAG1_HEAD_SIZE - 144 skb_network_header_len(skb), 8); 145 146 skb_offset = skb_network_header_len(skb); 147 skb_unprocessed = skb->len - skb->mac_len - skb_offset; 148 149 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, 150 LOWPAN_FRAG1_HEAD_SIZE, 0, 151 frag_len + skb_network_header_len(skb)); 152 if (rc) { 153 pr_debug("%s unable to send FRAG1 packet (tag: %d)", 154 __func__, ntohs(frag_tag)); 155 goto err; 156 } 157 158 frag_hdr[0] &= ~LOWPAN_DISPATCH_FRAG1; 159 frag_hdr[0] |= LOWPAN_DISPATCH_FRAGN; 160 frag_cap = round_down(payload_cap - LOWPAN_FRAGN_HEAD_SIZE, 8); 161 162 do { 163 dgram_offset += frag_len; 164 skb_offset += frag_len; 165 skb_unprocessed -= frag_len; 166 frag_len = min(frag_cap, skb_unprocessed); 167 168 frag_hdr[4] = dgram_offset >> 3; 169 170 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, 171 LOWPAN_FRAGN_HEAD_SIZE, skb_offset, 172 frag_len); 173 if (rc) { 174 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", 175 __func__, ntohs(frag_tag), skb_offset); 176 goto err; 177 } 178 } while (skb_unprocessed > frag_cap); 179 180 consume_skb(skb); 181 return NET_XMIT_SUCCESS; 182 183 err: 184 kfree_skb(skb); 185 return rc; 186 } 187 188 static int lowpan_header(struct sk_buff *skb, struct net_device *ldev, 189 u16 *dgram_size, u16 *dgram_offset) 190 { 191 struct wpan_dev *wpan_dev = lowpan_dev_info(ldev)->wdev->ieee802154_ptr; 192 struct ieee802154_addr sa, da; 193 struct ieee802154_mac_cb *cb = mac_cb_init(skb); 194 struct lowpan_addr_info info; 195 void *daddr, *saddr; 196 197 memcpy(&info, lowpan_skb_priv(skb), sizeof(info)); 198 199 /* TODO: Currently we only support extended_addr */ 200 daddr = &info.daddr.u.extended_addr; 201 saddr = &info.saddr.u.extended_addr; 202 203 *dgram_size = skb->len; 204 lowpan_header_compress(skb, ldev, ETH_P_IPV6, daddr, saddr, skb->len); 205 /* dgram_offset = (saved bytes after compression) + lowpan header len */ 206 *dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb); 207 208 cb->type = IEEE802154_FC_TYPE_DATA; 209 210 /* prepare wpan address data */ 211 sa.mode = IEEE802154_ADDR_LONG; 212 sa.pan_id = wpan_dev->pan_id; 213 sa.extended_addr = ieee802154_devaddr_from_raw(saddr); 214 215 /* intra-PAN communications */ 216 da.pan_id = sa.pan_id; 217 218 /* if the destination address is the broadcast address, use the 219 * corresponding short address 220 */ 221 if (lowpan_is_addr_broadcast((const u8 *)daddr)) { 222 da.mode = IEEE802154_ADDR_SHORT; 223 da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST); 224 cb->ackreq = false; 225 } else { 226 da.mode = IEEE802154_ADDR_LONG; 227 da.extended_addr = ieee802154_devaddr_from_raw(daddr); 228 cb->ackreq = wpan_dev->ackreq; 229 } 230 231 return dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, ETH_P_IPV6, 232 (void *)&da, (void *)&sa, 0); 233 } 234 235 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) 236 { 237 struct ieee802154_hdr wpan_hdr; 238 int max_single, ret; 239 u16 dgram_size, dgram_offset; 240 241 pr_debug("package xmit\n"); 242 243 /* We must take a copy of the skb before we modify/replace the ipv6 244 * header as the header could be used elsewhere 245 */ 246 skb = skb_unshare(skb, GFP_ATOMIC); 247 if (!skb) 248 return NET_XMIT_DROP; 249 250 ret = lowpan_header(skb, ldev, &dgram_size, &dgram_offset); 251 if (ret < 0) { 252 kfree_skb(skb); 253 return NET_XMIT_DROP; 254 } 255 256 if (ieee802154_hdr_peek(skb, &wpan_hdr) < 0) { 257 kfree_skb(skb); 258 return NET_XMIT_DROP; 259 } 260 261 max_single = ieee802154_max_payload(&wpan_hdr); 262 263 if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { 264 skb->dev = lowpan_dev_info(ldev)->wdev; 265 return dev_queue_xmit(skb); 266 } else { 267 netdev_tx_t rc; 268 269 pr_debug("frame is too big, fragmentation is needed\n"); 270 rc = lowpan_xmit_fragmented(skb, ldev, &wpan_hdr, dgram_size, 271 dgram_offset); 272 273 return rc < 0 ? NET_XMIT_DROP : rc; 274 } 275 } 276