1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-21 Intel Corporation. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/if_arp.h> 8 #include <linux/if_link.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/wwan.h> 11 #include <net/pkt_sched.h> 12 13 #include "iosm_ipc_chnl_cfg.h" 14 #include "iosm_ipc_imem_ops.h" 15 #include "iosm_ipc_wwan.h" 16 17 #define IOSM_IP_TYPE_MASK 0xF0 18 #define IOSM_IP_TYPE_IPV4 0x40 19 #define IOSM_IP_TYPE_IPV6 0x60 20 21 #define IOSM_IF_ID_PAYLOAD 2 22 23 /** 24 * struct iosm_netdev_priv - netdev WWAN driver specific private data 25 * @ipc_wwan: Pointer to iosm_wwan struct 26 * @netdev: Pointer to network interface device structure 27 * @if_id: Interface id for device. 28 * @ch_id: IPC channel number for which interface device is created. 29 */ 30 struct iosm_netdev_priv { 31 struct iosm_wwan *ipc_wwan; 32 struct net_device *netdev; 33 int if_id; 34 int ch_id; 35 }; 36 37 /** 38 * struct iosm_wwan - This structure contains information about WWAN root device 39 * and interface to the IPC layer. 40 * @ipc_imem: Pointer to imem data-struct 41 * @sub_netlist: List of active netdevs 42 * @dev: Pointer device structure 43 * @if_mutex: Mutex used for add and remove interface id 44 */ 45 struct iosm_wwan { 46 struct iosm_imem *ipc_imem; 47 struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1]; 48 struct device *dev; 49 struct mutex if_mutex; /* Mutex used for add and remove interface id */ 50 }; 51 52 /* Bring-up the wwan net link */ 53 static int ipc_wwan_link_open(struct net_device *netdev) 54 { 55 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); 56 struct iosm_wwan *ipc_wwan = priv->ipc_wwan; 57 int if_id = priv->if_id; 58 int ret; 59 60 if (if_id < IP_MUX_SESSION_START || 61 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) 62 return -EINVAL; 63 64 mutex_lock(&ipc_wwan->if_mutex); 65 66 /* get channel id */ 67 priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id); 68 69 if (priv->ch_id < 0) { 70 dev_err(ipc_wwan->dev, 71 "cannot connect wwan0 & id %d to the IPC mem layer", 72 if_id); 73 ret = -ENODEV; 74 goto out; 75 } 76 77 /* enable tx path, DL data may follow */ 78 netif_start_queue(netdev); 79 80 dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d", 81 priv->ch_id, priv->if_id); 82 83 ret = 0; 84 out: 85 mutex_unlock(&ipc_wwan->if_mutex); 86 return ret; 87 } 88 89 /* Bring-down the wwan net link */ 90 static int ipc_wwan_link_stop(struct net_device *netdev) 91 { 92 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); 93 94 netif_stop_queue(netdev); 95 96 mutex_lock(&priv->ipc_wwan->if_mutex); 97 ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id, 98 priv->ch_id); 99 priv->ch_id = -1; 100 mutex_unlock(&priv->ipc_wwan->if_mutex); 101 102 return 0; 103 } 104 105 /* Transmit a packet */ 106 static int ipc_wwan_link_transmit(struct sk_buff *skb, 107 struct net_device *netdev) 108 { 109 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); 110 struct iosm_wwan *ipc_wwan = priv->ipc_wwan; 111 unsigned int len = skb->len; 112 int if_id = priv->if_id; 113 int ret; 114 115 /* Interface IDs from 1 to 8 are for IP data 116 * & from 257 to 261 are for non-IP data 117 */ 118 if (if_id < IP_MUX_SESSION_START || 119 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) 120 return -EINVAL; 121 122 /* Send the SKB to device for transmission */ 123 ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem, 124 if_id, priv->ch_id, skb); 125 126 /* Return code of zero is success */ 127 if (ret == 0) { 128 netdev->stats.tx_packets++; 129 netdev->stats.tx_bytes += len; 130 ret = NETDEV_TX_OK; 131 } else if (ret == -EBUSY) { 132 ret = NETDEV_TX_BUSY; 133 dev_err(ipc_wwan->dev, "unable to push packets"); 134 } else { 135 goto exit; 136 } 137 138 return ret; 139 140 exit: 141 /* Log any skb drop */ 142 if (if_id) 143 dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id, 144 ret); 145 146 dev_kfree_skb_any(skb); 147 netdev->stats.tx_dropped++; 148 return NETDEV_TX_OK; 149 } 150 151 /* Ops structure for wwan net link */ 152 static const struct net_device_ops ipc_inm_ops = { 153 .ndo_open = ipc_wwan_link_open, 154 .ndo_stop = ipc_wwan_link_stop, 155 .ndo_start_xmit = ipc_wwan_link_transmit, 156 }; 157 158 /* Setup function for creating new net link */ 159 static void ipc_wwan_setup(struct net_device *iosm_dev) 160 { 161 iosm_dev->header_ops = NULL; 162 iosm_dev->hard_header_len = 0; 163 iosm_dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; 164 165 iosm_dev->type = ARPHRD_NONE; 166 iosm_dev->mtu = ETH_DATA_LEN; 167 iosm_dev->min_mtu = ETH_MIN_MTU; 168 iosm_dev->max_mtu = ETH_MAX_MTU; 169 170 iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP; 171 172 iosm_dev->netdev_ops = &ipc_inm_ops; 173 } 174 175 /* Create new wwan net link */ 176 static int ipc_wwan_newlink(void *ctxt, struct net_device *dev, 177 u32 if_id, struct netlink_ext_ack *extack) 178 { 179 struct iosm_wwan *ipc_wwan = ctxt; 180 struct iosm_netdev_priv *priv; 181 int err; 182 183 if (if_id < IP_MUX_SESSION_START || 184 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) 185 return -EINVAL; 186 187 priv = wwan_netdev_drvpriv(dev); 188 priv->if_id = if_id; 189 priv->netdev = dev; 190 priv->ipc_wwan = ipc_wwan; 191 192 mutex_lock(&ipc_wwan->if_mutex); 193 if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) { 194 err = -EBUSY; 195 goto out_unlock; 196 } 197 198 err = register_netdevice(dev); 199 if (err) 200 goto out_unlock; 201 202 rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv); 203 mutex_unlock(&ipc_wwan->if_mutex); 204 205 netif_device_attach(dev); 206 207 return 0; 208 209 out_unlock: 210 mutex_unlock(&ipc_wwan->if_mutex); 211 return err; 212 } 213 214 static void ipc_wwan_dellink(void *ctxt, struct net_device *dev, 215 struct list_head *head) 216 { 217 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev); 218 struct iosm_wwan *ipc_wwan = ctxt; 219 int if_id = priv->if_id; 220 221 if (WARN_ON(if_id < IP_MUX_SESSION_START || 222 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))) 223 return; 224 225 mutex_lock(&ipc_wwan->if_mutex); 226 227 if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv)) 228 goto unlock; 229 230 RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL); 231 /* unregistering includes synchronize_net() */ 232 unregister_netdevice_queue(dev, head); 233 234 unlock: 235 mutex_unlock(&ipc_wwan->if_mutex); 236 } 237 238 static const struct wwan_ops iosm_wwan_ops = { 239 .priv_size = sizeof(struct iosm_netdev_priv), 240 .setup = ipc_wwan_setup, 241 .newlink = ipc_wwan_newlink, 242 .dellink = ipc_wwan_dellink, 243 }; 244 245 int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, 246 bool dss, int if_id) 247 { 248 struct sk_buff *skb = skb_arg; 249 struct net_device_stats *stats; 250 struct iosm_netdev_priv *priv; 251 int ret; 252 253 if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4) 254 skb->protocol = htons(ETH_P_IP); 255 else if ((skb->data[0] & IOSM_IP_TYPE_MASK) == 256 IOSM_IP_TYPE_IPV6) 257 skb->protocol = htons(ETH_P_IPV6); 258 259 skb->pkt_type = PACKET_HOST; 260 261 if (if_id < IP_MUX_SESSION_START || 262 if_id > IP_MUX_SESSION_END) { 263 ret = -EINVAL; 264 goto free; 265 } 266 267 rcu_read_lock(); 268 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]); 269 if (!priv) { 270 ret = -EINVAL; 271 goto unlock; 272 } 273 skb->dev = priv->netdev; 274 stats = &priv->netdev->stats; 275 stats->rx_packets++; 276 stats->rx_bytes += skb->len; 277 278 ret = netif_rx(skb); 279 skb = NULL; 280 unlock: 281 rcu_read_unlock(); 282 free: 283 dev_kfree_skb(skb); 284 return ret; 285 } 286 287 void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on) 288 { 289 struct net_device *netdev; 290 struct iosm_netdev_priv *priv; 291 bool is_tx_blk; 292 293 rcu_read_lock(); 294 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]); 295 if (!priv) { 296 rcu_read_unlock(); 297 return; 298 } 299 300 netdev = priv->netdev; 301 302 is_tx_blk = netif_queue_stopped(netdev); 303 304 if (on) 305 dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable", 306 if_id); 307 308 if (on && !is_tx_blk) 309 netif_stop_queue(netdev); 310 else if (!on && is_tx_blk) 311 netif_wake_queue(netdev); 312 rcu_read_unlock(); 313 } 314 315 struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev) 316 { 317 struct iosm_wwan *ipc_wwan; 318 319 ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL); 320 if (!ipc_wwan) 321 return NULL; 322 323 ipc_wwan->dev = dev; 324 ipc_wwan->ipc_imem = ipc_imem; 325 326 /* WWAN core will create a netdev for the default IP MUX channel */ 327 if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan, 328 IP_MUX_SESSION_DEFAULT)) { 329 kfree(ipc_wwan); 330 return NULL; 331 } 332 333 mutex_init(&ipc_wwan->if_mutex); 334 335 return ipc_wwan; 336 } 337 338 void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan) 339 { 340 /* This call will remove all child netdev(s) */ 341 wwan_unregister_ops(ipc_wwan->dev); 342 343 mutex_destroy(&ipc_wwan->if_mutex); 344 345 kfree(ipc_wwan); 346 } 347