1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * RMNET Data ingress/egress handler 13 * 14 */ 15 16 #include <linux/netdevice.h> 17 #include <linux/netdev_features.h> 18 #include <linux/if_arp.h> 19 #include <net/sock.h> 20 #include "rmnet_private.h" 21 #include "rmnet_config.h" 22 #include "rmnet_vnd.h" 23 #include "rmnet_map.h" 24 #include "rmnet_handlers.h" 25 26 #define RMNET_IP_VERSION_4 0x40 27 #define RMNET_IP_VERSION_6 0x60 28 29 /* Helper Functions */ 30 31 static void rmnet_set_skb_proto(struct sk_buff *skb) 32 { 33 switch (skb->data[0] & 0xF0) { 34 case RMNET_IP_VERSION_4: 35 skb->protocol = htons(ETH_P_IP); 36 break; 37 case RMNET_IP_VERSION_6: 38 skb->protocol = htons(ETH_P_IPV6); 39 break; 40 default: 41 skb->protocol = htons(ETH_P_MAP); 42 break; 43 } 44 } 45 46 /* Generic handler */ 47 48 static void 49 rmnet_deliver_skb(struct sk_buff *skb) 50 { 51 struct rmnet_priv *priv = netdev_priv(skb->dev); 52 53 skb_reset_transport_header(skb); 54 skb_reset_network_header(skb); 55 rmnet_vnd_rx_fixup(skb, skb->dev); 56 57 skb->pkt_type = PACKET_HOST; 58 skb_set_mac_header(skb, 0); 59 gro_cells_receive(&priv->gro_cells, skb); 60 } 61 62 /* MAP handler */ 63 64 static void 65 __rmnet_map_ingress_handler(struct sk_buff *skb, 66 struct rmnet_port *port) 67 { 68 struct rmnet_endpoint *ep; 69 u16 len, pad; 70 u8 mux_id; 71 72 if (RMNET_MAP_GET_CD_BIT(skb)) { 73 if (port->data_format & RMNET_INGRESS_FORMAT_MAP_COMMANDS) 74 return rmnet_map_command(skb, port); 75 76 goto free_skb; 77 } 78 79 mux_id = RMNET_MAP_GET_MUX_ID(skb); 80 pad = RMNET_MAP_GET_PAD(skb); 81 len = RMNET_MAP_GET_LENGTH(skb) - pad; 82 83 if (mux_id >= RMNET_MAX_LOGICAL_EP) 84 goto free_skb; 85 86 ep = rmnet_get_endpoint(port, mux_id); 87 if (!ep) 88 goto free_skb; 89 90 skb->dev = ep->egress_dev; 91 92 /* Subtract MAP header */ 93 skb_pull(skb, sizeof(struct rmnet_map_header)); 94 rmnet_set_skb_proto(skb); 95 96 if (port->data_format & RMNET_INGRESS_FORMAT_MAP_CKSUMV4) { 97 if (!rmnet_map_checksum_downlink_packet(skb, len + pad)) 98 skb->ip_summed = CHECKSUM_UNNECESSARY; 99 } 100 101 skb_trim(skb, len); 102 rmnet_deliver_skb(skb); 103 return; 104 105 free_skb: 106 kfree_skb(skb); 107 } 108 109 static void 110 rmnet_map_ingress_handler(struct sk_buff *skb, 111 struct rmnet_port *port) 112 { 113 struct sk_buff *skbn; 114 115 if (skb->dev->type == ARPHRD_ETHER) { 116 if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_KERNEL)) { 117 kfree_skb(skb); 118 return; 119 } 120 121 skb_push(skb, ETH_HLEN); 122 } 123 124 if (port->data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { 125 while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL) 126 __rmnet_map_ingress_handler(skbn, port); 127 128 consume_skb(skb); 129 } else { 130 __rmnet_map_ingress_handler(skb, port); 131 } 132 } 133 134 static int rmnet_map_egress_handler(struct sk_buff *skb, 135 struct rmnet_port *port, u8 mux_id, 136 struct net_device *orig_dev) 137 { 138 int required_headroom, additional_header_len; 139 struct rmnet_map_header *map_header; 140 141 additional_header_len = 0; 142 required_headroom = sizeof(struct rmnet_map_header); 143 144 if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) { 145 additional_header_len = sizeof(struct rmnet_map_ul_csum_header); 146 required_headroom += additional_header_len; 147 } 148 149 if (skb_headroom(skb) < required_headroom) { 150 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) 151 goto fail; 152 } 153 154 if (port->data_format & RMNET_EGRESS_FORMAT_MAP_CKSUMV4) 155 rmnet_map_checksum_uplink_packet(skb, orig_dev); 156 157 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); 158 if (!map_header) 159 goto fail; 160 161 map_header->mux_id = mux_id; 162 163 skb->protocol = htons(ETH_P_MAP); 164 165 return 0; 166 167 fail: 168 kfree_skb(skb); 169 return -ENOMEM; 170 } 171 172 static void 173 rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev) 174 { 175 if (bridge_dev) { 176 skb->dev = bridge_dev; 177 dev_queue_xmit(skb); 178 } 179 } 180 181 /* Ingress / Egress Entry Points */ 182 183 /* Processes packet as per ingress data format for receiving device. Logical 184 * endpoint is determined from packet inspection. Packet is then sent to the 185 * egress device listed in the logical endpoint configuration. 186 */ 187 rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) 188 { 189 struct sk_buff *skb = *pskb; 190 struct rmnet_port *port; 191 struct net_device *dev; 192 193 if (!skb) 194 goto done; 195 196 dev = skb->dev; 197 port = rmnet_get_port(dev); 198 199 switch (port->rmnet_mode) { 200 case RMNET_EPMODE_VND: 201 rmnet_map_ingress_handler(skb, port); 202 break; 203 case RMNET_EPMODE_BRIDGE: 204 rmnet_bridge_handler(skb, port->bridge_ep); 205 break; 206 } 207 208 done: 209 return RX_HANDLER_CONSUMED; 210 } 211 212 /* Modifies packet as per logical endpoint configuration and egress data format 213 * for egress device configured in logical endpoint. Packet is then transmitted 214 * on the egress device. 215 */ 216 void rmnet_egress_handler(struct sk_buff *skb) 217 { 218 struct net_device *orig_dev; 219 struct rmnet_port *port; 220 struct rmnet_priv *priv; 221 u8 mux_id; 222 223 sk_pacing_shift_update(skb->sk, 8); 224 225 orig_dev = skb->dev; 226 priv = netdev_priv(orig_dev); 227 skb->dev = priv->real_dev; 228 mux_id = priv->mux_id; 229 230 port = rmnet_get_port(skb->dev); 231 if (!port) { 232 kfree_skb(skb); 233 return; 234 } 235 236 if (rmnet_map_egress_handler(skb, port, mux_id, orig_dev)) 237 return; 238 239 rmnet_vnd_tx_fixup(skb, orig_dev); 240 241 dev_queue_xmit(skb); 242 } 243