1 /* This program is free software; you can redistribute it and/or modify 2 * it under the terms of the GNU General Public License version 2 3 * as published by the Free Software Foundation. 4 * 5 * This program is distributed in the hope that it will be useful, 6 * but WITHOUT ANY WARRANTY; without even the implied warranty of 7 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 8 * GNU General Public License for more details. 9 */ 10 11 #include <linux/if_arp.h> 12 13 #include <net/6lowpan.h> 14 #include <net/mac802154.h> 15 #include <net/ieee802154_netdev.h> 16 17 #include "6lowpan_i.h" 18 19 #define LOWPAN_DISPATCH_FIRST 0xc0 20 #define LOWPAN_DISPATCH_FRAG_MASK 0xf8 21 22 #define LOWPAN_DISPATCH_NALP 0x00 23 #define LOWPAN_DISPATCH_ESC 0x40 24 #define LOWPAN_DISPATCH_HC1 0x42 25 #define LOWPAN_DISPATCH_DFF 0x43 26 #define LOWPAN_DISPATCH_BC0 0x50 27 #define LOWPAN_DISPATCH_MESH 0x80 28 29 static int lowpan_give_skb_to_device(struct sk_buff *skb) 30 { 31 skb->protocol = htons(ETH_P_IPV6); 32 33 return netif_rx(skb); 34 } 35 36 static int lowpan_rx_handlers_result(struct sk_buff *skb, lowpan_rx_result res) 37 { 38 switch (res) { 39 case RX_CONTINUE: 40 /* nobody cared about this packet */ 41 net_warn_ratelimited("%s: received unknown dispatch\n", 42 __func__); 43 44 /* fall-through */ 45 case RX_DROP_UNUSABLE: 46 kfree_skb(skb); 47 48 /* fall-through */ 49 case RX_DROP: 50 return NET_RX_DROP; 51 case RX_QUEUED: 52 return lowpan_give_skb_to_device(skb); 53 default: 54 break; 55 } 56 57 return NET_RX_DROP; 58 } 59 60 static inline bool lowpan_is_frag1(u8 dispatch) 61 { 62 return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAG1; 63 } 64 65 static inline bool lowpan_is_fragn(u8 dispatch) 66 { 67 return (dispatch & LOWPAN_DISPATCH_FRAG_MASK) == LOWPAN_DISPATCH_FRAGN; 68 } 69 70 static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb) 71 { 72 int ret; 73 74 if (!(lowpan_is_frag1(*skb_network_header(skb)) || 75 lowpan_is_fragn(*skb_network_header(skb)))) 76 return RX_CONTINUE; 77 78 ret = lowpan_frag_rcv(skb, *skb_network_header(skb) & 79 LOWPAN_DISPATCH_FRAG_MASK); 80 if (ret == 1) 81 return RX_QUEUED; 82 83 /* Packet is freed by lowpan_frag_rcv on error or put into the frag 84 * bucket. 85 */ 86 return RX_DROP; 87 } 88 89 int lowpan_iphc_decompress(struct sk_buff *skb) 90 { 91 struct ieee802154_addr_sa sa, da; 92 struct ieee802154_hdr hdr; 93 u8 iphc0, iphc1; 94 void *sap, *dap; 95 96 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) 97 return -EINVAL; 98 99 raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len); 100 101 if (lowpan_fetch_skb_u8(skb, &iphc0) || 102 lowpan_fetch_skb_u8(skb, &iphc1)) 103 return -EINVAL; 104 105 ieee802154_addr_to_sa(&sa, &hdr.source); 106 ieee802154_addr_to_sa(&da, &hdr.dest); 107 108 if (sa.addr_type == IEEE802154_ADDR_SHORT) 109 sap = &sa.short_addr; 110 else 111 sap = &sa.hwaddr; 112 113 if (da.addr_type == IEEE802154_ADDR_SHORT) 114 dap = &da.short_addr; 115 else 116 dap = &da.hwaddr; 117 118 return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type, 119 IEEE802154_ADDR_LEN, dap, da.addr_type, 120 IEEE802154_ADDR_LEN, iphc0, iphc1); 121 } 122 123 static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb) 124 { 125 int ret; 126 127 if (!lowpan_is_iphc(*skb_network_header(skb))) 128 return RX_CONTINUE; 129 130 /* Setting datagram_offset to zero indicates non frag handling 131 * while doing lowpan_header_decompress. 132 */ 133 lowpan_802154_cb(skb)->d_size = 0; 134 135 ret = lowpan_iphc_decompress(skb); 136 if (ret < 0) 137 return RX_DROP_UNUSABLE; 138 139 return RX_QUEUED; 140 } 141 142 lowpan_rx_result lowpan_rx_h_ipv6(struct sk_buff *skb) 143 { 144 if (!lowpan_is_ipv6(*skb_network_header(skb))) 145 return RX_CONTINUE; 146 147 /* Pull off the 1-byte of 6lowpan header. */ 148 skb_pull(skb, 1); 149 return RX_QUEUED; 150 } 151 152 static inline bool lowpan_is_esc(u8 dispatch) 153 { 154 return dispatch == LOWPAN_DISPATCH_ESC; 155 } 156 157 static lowpan_rx_result lowpan_rx_h_esc(struct sk_buff *skb) 158 { 159 if (!lowpan_is_esc(*skb_network_header(skb))) 160 return RX_CONTINUE; 161 162 net_warn_ratelimited("%s: %s\n", skb->dev->name, 163 "6LoWPAN ESC not supported\n"); 164 165 return RX_DROP_UNUSABLE; 166 } 167 168 static inline bool lowpan_is_hc1(u8 dispatch) 169 { 170 return dispatch == LOWPAN_DISPATCH_HC1; 171 } 172 173 static lowpan_rx_result lowpan_rx_h_hc1(struct sk_buff *skb) 174 { 175 if (!lowpan_is_hc1(*skb_network_header(skb))) 176 return RX_CONTINUE; 177 178 net_warn_ratelimited("%s: %s\n", skb->dev->name, 179 "6LoWPAN HC1 not supported\n"); 180 181 return RX_DROP_UNUSABLE; 182 } 183 184 static inline bool lowpan_is_dff(u8 dispatch) 185 { 186 return dispatch == LOWPAN_DISPATCH_DFF; 187 } 188 189 static lowpan_rx_result lowpan_rx_h_dff(struct sk_buff *skb) 190 { 191 if (!lowpan_is_dff(*skb_network_header(skb))) 192 return RX_CONTINUE; 193 194 net_warn_ratelimited("%s: %s\n", skb->dev->name, 195 "6LoWPAN DFF not supported\n"); 196 197 return RX_DROP_UNUSABLE; 198 } 199 200 static inline bool lowpan_is_bc0(u8 dispatch) 201 { 202 return dispatch == LOWPAN_DISPATCH_BC0; 203 } 204 205 static lowpan_rx_result lowpan_rx_h_bc0(struct sk_buff *skb) 206 { 207 if (!lowpan_is_bc0(*skb_network_header(skb))) 208 return RX_CONTINUE; 209 210 net_warn_ratelimited("%s: %s\n", skb->dev->name, 211 "6LoWPAN BC0 not supported\n"); 212 213 return RX_DROP_UNUSABLE; 214 } 215 216 static inline bool lowpan_is_mesh(u8 dispatch) 217 { 218 return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_MESH; 219 } 220 221 static lowpan_rx_result lowpan_rx_h_mesh(struct sk_buff *skb) 222 { 223 if (!lowpan_is_mesh(*skb_network_header(skb))) 224 return RX_CONTINUE; 225 226 net_warn_ratelimited("%s: %s\n", skb->dev->name, 227 "6LoWPAN MESH not supported\n"); 228 229 return RX_DROP_UNUSABLE; 230 } 231 232 static int lowpan_invoke_rx_handlers(struct sk_buff *skb) 233 { 234 lowpan_rx_result res; 235 236 #define CALL_RXH(rxh) \ 237 do { \ 238 res = rxh(skb); \ 239 if (res != RX_CONTINUE) \ 240 goto rxh_next; \ 241 } while (0) 242 243 /* likely at first */ 244 CALL_RXH(lowpan_rx_h_iphc); 245 CALL_RXH(lowpan_rx_h_frag); 246 CALL_RXH(lowpan_rx_h_ipv6); 247 CALL_RXH(lowpan_rx_h_esc); 248 CALL_RXH(lowpan_rx_h_hc1); 249 CALL_RXH(lowpan_rx_h_dff); 250 CALL_RXH(lowpan_rx_h_bc0); 251 CALL_RXH(lowpan_rx_h_mesh); 252 253 rxh_next: 254 return lowpan_rx_handlers_result(skb, res); 255 #undef CALL_RXH 256 } 257 258 static inline bool lowpan_is_nalp(u8 dispatch) 259 { 260 return (dispatch & LOWPAN_DISPATCH_FIRST) == LOWPAN_DISPATCH_NALP; 261 } 262 263 /* Lookup for reserved dispatch values at: 264 * https://www.iana.org/assignments/_6lowpan-parameters/_6lowpan-parameters.xhtml#_6lowpan-parameters-1 265 * 266 * Last Updated: 2015-01-22 267 */ 268 static inline bool lowpan_is_reserved(u8 dispatch) 269 { 270 return ((dispatch >= 0x44 && dispatch <= 0x4F) || 271 (dispatch >= 0x51 && dispatch <= 0x5F) || 272 (dispatch >= 0xc8 && dispatch <= 0xdf) || 273 (dispatch >= 0xe8 && dispatch <= 0xff)); 274 } 275 276 /* lowpan_rx_h_check checks on generic 6LoWPAN requirements 277 * in MAC and 6LoWPAN header. 278 * 279 * Don't manipulate the skb here, it could be shared buffer. 280 */ 281 static inline bool lowpan_rx_h_check(struct sk_buff *skb) 282 { 283 __le16 fc = ieee802154_get_fc_from_skb(skb); 284 285 /* check on ieee802154 conform 6LoWPAN header */ 286 if (!ieee802154_is_data(fc) || 287 !ieee802154_is_intra_pan(fc)) 288 return false; 289 290 /* check if we can dereference the dispatch */ 291 if (unlikely(!skb->len)) 292 return false; 293 294 if (lowpan_is_nalp(*skb_network_header(skb)) || 295 lowpan_is_reserved(*skb_network_header(skb))) 296 return false; 297 298 return true; 299 } 300 301 static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev, 302 struct packet_type *pt, struct net_device *orig_wdev) 303 { 304 struct net_device *ldev; 305 306 if (wdev->type != ARPHRD_IEEE802154 || 307 skb->pkt_type == PACKET_OTHERHOST || 308 !lowpan_rx_h_check(skb)) 309 return NET_RX_DROP; 310 311 ldev = wdev->ieee802154_ptr->lowpan_dev; 312 if (!ldev || !netif_running(ldev)) 313 return NET_RX_DROP; 314 315 /* Replacing skb->dev and followed rx handlers will manipulate skb. */ 316 skb = skb_share_check(skb, GFP_ATOMIC); 317 if (!skb) 318 return NET_RX_DROP; 319 skb->dev = ldev; 320 321 /* When receive frag1 it's likely that we manipulate the buffer. 322 * When recevie iphc we manipulate the data buffer. So we need 323 * to unshare the buffer. 324 */ 325 if (lowpan_is_frag1(*skb_network_header(skb)) || 326 lowpan_is_iphc(*skb_network_header(skb))) { 327 skb = skb_unshare(skb, GFP_ATOMIC); 328 if (!skb) 329 return NET_RX_DROP; 330 } 331 332 return lowpan_invoke_rx_handlers(skb); 333 } 334 335 static struct packet_type lowpan_packet_type = { 336 .type = htons(ETH_P_IEEE802154), 337 .func = lowpan_rcv, 338 }; 339 340 void lowpan_rx_init(void) 341 { 342 dev_add_pack(&lowpan_packet_type); 343 } 344 345 void lowpan_rx_exit(void) 346 { 347 dev_remove_pack(&lowpan_packet_type); 348 } 349