1 /* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved. 2 * 3 * This program is free software; you can redistribute it and/or modify 4 * it under the terms of the GNU General Public License version 2 and 5 * only version 2 as published by the Free Software Foundation. 6 * 7 * This program is distributed in the hope that it will be useful, 8 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * GNU General Public License for more details. 11 * 12 * RMNET Data ingress/egress handler 13 * 14 */ 15 16 #include <linux/netdevice.h> 17 #include <linux/netdev_features.h> 18 #include "rmnet_private.h" 19 #include "rmnet_config.h" 20 #include "rmnet_vnd.h" 21 #include "rmnet_map.h" 22 #include "rmnet_handlers.h" 23 24 #define RMNET_IP_VERSION_4 0x40 25 #define RMNET_IP_VERSION_6 0x60 26 27 /* Helper Functions */ 28 29 static void rmnet_set_skb_proto(struct sk_buff *skb) 30 { 31 switch (skb->data[0] & 0xF0) { 32 case RMNET_IP_VERSION_4: 33 skb->protocol = htons(ETH_P_IP); 34 break; 35 case RMNET_IP_VERSION_6: 36 skb->protocol = htons(ETH_P_IPV6); 37 break; 38 default: 39 skb->protocol = htons(ETH_P_MAP); 40 break; 41 } 42 } 43 44 /* Generic handler */ 45 46 static rx_handler_result_t 47 rmnet_bridge_handler(struct sk_buff *skb, struct rmnet_endpoint *ep) 48 { 49 if (!ep->egress_dev) 50 kfree_skb(skb); 51 else 52 rmnet_egress_handler(skb, ep); 53 54 return RX_HANDLER_CONSUMED; 55 } 56 57 static rx_handler_result_t 58 rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_endpoint *ep) 59 { 60 switch (ep->rmnet_mode) { 61 case RMNET_EPMODE_NONE: 62 return RX_HANDLER_PASS; 63 64 case RMNET_EPMODE_BRIDGE: 65 return rmnet_bridge_handler(skb, ep); 66 67 case RMNET_EPMODE_VND: 68 skb_reset_transport_header(skb); 69 skb_reset_network_header(skb); 70 rmnet_vnd_rx_fixup(skb, skb->dev); 71 72 skb->pkt_type = PACKET_HOST; 73 skb_set_mac_header(skb, 0); 74 netif_receive_skb(skb); 75 return RX_HANDLER_CONSUMED; 76 77 default: 78 kfree_skb(skb); 79 return RX_HANDLER_CONSUMED; 80 } 81 } 82 83 static rx_handler_result_t 84 rmnet_ingress_deliver_packet(struct sk_buff *skb, 85 struct rmnet_port *port) 86 { 87 if (!port) { 88 kfree_skb(skb); 89 return RX_HANDLER_CONSUMED; 90 } 91 92 skb->dev = port->local_ep.egress_dev; 93 94 return rmnet_deliver_skb(skb, &port->local_ep); 95 } 96 97 /* MAP handler */ 98 99 static rx_handler_result_t 100 __rmnet_map_ingress_handler(struct sk_buff *skb, 101 struct rmnet_port *port) 102 { 103 struct rmnet_endpoint *ep; 104 u8 mux_id; 105 u16 len; 106 107 if (RMNET_MAP_GET_CD_BIT(skb)) { 108 if (port->ingress_data_format 109 & RMNET_INGRESS_FORMAT_MAP_COMMANDS) 110 return rmnet_map_command(skb, port); 111 112 kfree_skb(skb); 113 return RX_HANDLER_CONSUMED; 114 } 115 116 mux_id = RMNET_MAP_GET_MUX_ID(skb); 117 len = RMNET_MAP_GET_LENGTH(skb) - RMNET_MAP_GET_PAD(skb); 118 119 if (mux_id >= RMNET_MAX_LOGICAL_EP) { 120 kfree_skb(skb); 121 return RX_HANDLER_CONSUMED; 122 } 123 124 ep = &port->muxed_ep[mux_id]; 125 126 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEMUXING) 127 skb->dev = ep->egress_dev; 128 129 /* Subtract MAP header */ 130 skb_pull(skb, sizeof(struct rmnet_map_header)); 131 skb_trim(skb, len); 132 rmnet_set_skb_proto(skb); 133 return rmnet_deliver_skb(skb, ep); 134 } 135 136 static rx_handler_result_t 137 rmnet_map_ingress_handler(struct sk_buff *skb, 138 struct rmnet_port *port) 139 { 140 struct sk_buff *skbn; 141 int rc; 142 143 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_DEAGGREGATION) { 144 while ((skbn = rmnet_map_deaggregate(skb)) != NULL) 145 __rmnet_map_ingress_handler(skbn, port); 146 147 consume_skb(skb); 148 rc = RX_HANDLER_CONSUMED; 149 } else { 150 rc = __rmnet_map_ingress_handler(skb, port); 151 } 152 153 return rc; 154 } 155 156 static int rmnet_map_egress_handler(struct sk_buff *skb, 157 struct rmnet_port *port, 158 struct rmnet_endpoint *ep, 159 struct net_device *orig_dev) 160 { 161 int required_headroom, additional_header_len; 162 struct rmnet_map_header *map_header; 163 164 additional_header_len = 0; 165 required_headroom = sizeof(struct rmnet_map_header); 166 167 if (skb_headroom(skb) < required_headroom) { 168 if (pskb_expand_head(skb, required_headroom, 0, GFP_KERNEL)) 169 return RMNET_MAP_CONSUMED; 170 } 171 172 map_header = rmnet_map_add_map_header(skb, additional_header_len, 0); 173 if (!map_header) 174 return RMNET_MAP_CONSUMED; 175 176 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MUXING) { 177 if (ep->mux_id == 0xff) 178 map_header->mux_id = 0; 179 else 180 map_header->mux_id = ep->mux_id; 181 } 182 183 skb->protocol = htons(ETH_P_MAP); 184 185 return RMNET_MAP_SUCCESS; 186 } 187 188 /* Ingress / Egress Entry Points */ 189 190 /* Processes packet as per ingress data format for receiving device. Logical 191 * endpoint is determined from packet inspection. Packet is then sent to the 192 * egress device listed in the logical endpoint configuration. 193 */ 194 rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb) 195 { 196 struct rmnet_port *port; 197 struct sk_buff *skb = *pskb; 198 struct net_device *dev; 199 int rc; 200 201 if (!skb) 202 return RX_HANDLER_CONSUMED; 203 204 dev = skb->dev; 205 port = rmnet_get_port(dev); 206 207 if (port->ingress_data_format & RMNET_INGRESS_FORMAT_MAP) { 208 rc = rmnet_map_ingress_handler(skb, port); 209 } else { 210 switch (ntohs(skb->protocol)) { 211 case ETH_P_MAP: 212 if (port->local_ep.rmnet_mode == 213 RMNET_EPMODE_BRIDGE) { 214 rc = rmnet_ingress_deliver_packet(skb, port); 215 } else { 216 kfree_skb(skb); 217 rc = RX_HANDLER_CONSUMED; 218 } 219 break; 220 221 case ETH_P_IP: 222 case ETH_P_IPV6: 223 rc = rmnet_ingress_deliver_packet(skb, port); 224 break; 225 226 default: 227 rc = RX_HANDLER_PASS; 228 } 229 } 230 231 return rc; 232 } 233 234 /* Modifies packet as per logical endpoint configuration and egress data format 235 * for egress device configured in logical endpoint. Packet is then transmitted 236 * on the egress device. 237 */ 238 void rmnet_egress_handler(struct sk_buff *skb, 239 struct rmnet_endpoint *ep) 240 { 241 struct net_device *orig_dev; 242 struct rmnet_port *port; 243 244 orig_dev = skb->dev; 245 skb->dev = ep->egress_dev; 246 247 port = rmnet_get_port(skb->dev); 248 if (!port) { 249 kfree_skb(skb); 250 return; 251 } 252 253 if (port->egress_data_format & RMNET_EGRESS_FORMAT_MAP) { 254 switch (rmnet_map_egress_handler(skb, port, ep, orig_dev)) { 255 case RMNET_MAP_CONSUMED: 256 return; 257 258 case RMNET_MAP_SUCCESS: 259 break; 260 261 default: 262 kfree_skb(skb); 263 return; 264 } 265 } 266 267 if (ep->rmnet_mode == RMNET_EPMODE_VND) 268 rmnet_vnd_tx_fixup(skb, orig_dev); 269 270 dev_queue_xmit(skb); 271 } 272