1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020-21 Intel Corporation. 4 */ 5 6 #include <linux/etherdevice.h> 7 #include <linux/if_arp.h> 8 #include <linux/if_link.h> 9 #include <linux/rtnetlink.h> 10 #include <linux/wwan.h> 11 12 #include "iosm_ipc_chnl_cfg.h" 13 #include "iosm_ipc_imem_ops.h" 14 #include "iosm_ipc_wwan.h" 15 16 #define IOSM_IP_TYPE_MASK 0xF0 17 #define IOSM_IP_TYPE_IPV4 0x40 18 #define IOSM_IP_TYPE_IPV6 0x60 19 20 #define IOSM_IF_ID_PAYLOAD 2 21 22 /** 23 * struct iosm_netdev_priv - netdev WWAN driver specific private data 24 * @ipc_wwan: Pointer to iosm_wwan struct 25 * @netdev: Pointer to network interface device structure 26 * @if_id: Interface id for device. 27 * @ch_id: IPC channel number for which interface device is created. 28 */ 29 struct iosm_netdev_priv { 30 struct iosm_wwan *ipc_wwan; 31 struct net_device *netdev; 32 int if_id; 33 int ch_id; 34 }; 35 36 /** 37 * struct iosm_wwan - This structure contains information about WWAN root device 38 * and interface to the IPC layer. 39 * @ipc_imem: Pointer to imem data-struct 40 * @sub_netlist: List of active netdevs 41 * @dev: Pointer device structure 42 * @if_mutex: Mutex used for add and remove interface id 43 */ 44 struct iosm_wwan { 45 struct iosm_imem *ipc_imem; 46 struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1]; 47 struct device *dev; 48 struct mutex if_mutex; /* Mutex used for add and remove interface id */ 49 }; 50 51 /* Bring-up the wwan net link */ 52 static int ipc_wwan_link_open(struct net_device *netdev) 53 { 54 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); 55 struct iosm_wwan *ipc_wwan = priv->ipc_wwan; 56 int if_id = priv->if_id; 57 int ret; 58 59 if (if_id < IP_MUX_SESSION_START || 60 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) 61 return -EINVAL; 62 63 mutex_lock(&ipc_wwan->if_mutex); 64 65 /* get channel id */ 66 priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id); 67 68 if (priv->ch_id < 0) { 69 dev_err(ipc_wwan->dev, 70 "cannot connect wwan0 & id %d to the IPC mem layer", 71 if_id); 72 ret = -ENODEV; 73 goto out; 74 } 75 76 /* enable tx path, DL data may follow */ 77 netif_start_queue(netdev); 78 79 dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d", 80 priv->ch_id, priv->if_id); 81 82 ret = 0; 83 out: 84 mutex_unlock(&ipc_wwan->if_mutex); 85 return ret; 86 } 87 88 /* Bring-down the wwan net link */ 89 static int ipc_wwan_link_stop(struct net_device *netdev) 90 { 91 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); 92 93 netif_stop_queue(netdev); 94 95 mutex_lock(&priv->ipc_wwan->if_mutex); 96 ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id, 97 priv->ch_id); 98 priv->ch_id = -1; 99 mutex_unlock(&priv->ipc_wwan->if_mutex); 100 101 return 0; 102 } 103 104 /* Transmit a packet */ 105 static int ipc_wwan_link_transmit(struct sk_buff *skb, 106 struct net_device *netdev) 107 { 108 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); 109 struct iosm_wwan *ipc_wwan = priv->ipc_wwan; 110 unsigned int len = skb->len; 111 int if_id = priv->if_id; 112 int ret; 113 114 /* Interface IDs from 1 to 8 are for IP data 115 * & from 257 to 261 are for non-IP data 116 */ 117 if (if_id < IP_MUX_SESSION_START || 118 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) 119 return -EINVAL; 120 121 /* Send the SKB to device for transmission */ 122 ret = ipc_imem_sys_wwan_transmit(ipc_wwan->ipc_imem, 123 if_id, priv->ch_id, skb); 124 125 /* Return code of zero is success */ 126 if (ret == 0) { 127 netdev->stats.tx_packets++; 128 netdev->stats.tx_bytes += len; 129 ret = NETDEV_TX_OK; 130 } else if (ret == -EBUSY) { 131 ret = NETDEV_TX_BUSY; 132 dev_err(ipc_wwan->dev, "unable to push packets"); 133 } else { 134 goto exit; 135 } 136 137 return ret; 138 139 exit: 140 /* Log any skb drop */ 141 if (if_id) 142 dev_dbg(ipc_wwan->dev, "skb dropped. IF_ID: %d, ret: %d", if_id, 143 ret); 144 145 dev_kfree_skb_any(skb); 146 netdev->stats.tx_dropped++; 147 return NETDEV_TX_OK; 148 } 149 150 /* Ops structure for wwan net link */ 151 static const struct net_device_ops ipc_inm_ops = { 152 .ndo_open = ipc_wwan_link_open, 153 .ndo_stop = ipc_wwan_link_stop, 154 .ndo_start_xmit = ipc_wwan_link_transmit, 155 }; 156 157 /* Setup function for creating new net link */ 158 static void ipc_wwan_setup(struct net_device *iosm_dev) 159 { 160 iosm_dev->header_ops = NULL; 161 iosm_dev->hard_header_len = 0; 162 iosm_dev->priv_flags |= IFF_NO_QUEUE; 163 164 iosm_dev->type = ARPHRD_NONE; 165 iosm_dev->mtu = ETH_DATA_LEN; 166 iosm_dev->min_mtu = ETH_MIN_MTU; 167 iosm_dev->max_mtu = ETH_MAX_MTU; 168 169 iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP; 170 171 iosm_dev->netdev_ops = &ipc_inm_ops; 172 } 173 174 /* Create new wwan net link */ 175 static int ipc_wwan_newlink(void *ctxt, struct net_device *dev, 176 u32 if_id, struct netlink_ext_ack *extack) 177 { 178 struct iosm_wwan *ipc_wwan = ctxt; 179 struct iosm_netdev_priv *priv; 180 int err; 181 182 if (if_id < IP_MUX_SESSION_START || 183 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) 184 return -EINVAL; 185 186 priv = wwan_netdev_drvpriv(dev); 187 priv->if_id = if_id; 188 priv->netdev = dev; 189 priv->ipc_wwan = ipc_wwan; 190 191 mutex_lock(&ipc_wwan->if_mutex); 192 if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) { 193 err = -EBUSY; 194 goto out_unlock; 195 } 196 197 err = register_netdevice(dev); 198 if (err) 199 goto out_unlock; 200 201 rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv); 202 mutex_unlock(&ipc_wwan->if_mutex); 203 204 netif_device_attach(dev); 205 206 return 0; 207 208 out_unlock: 209 mutex_unlock(&ipc_wwan->if_mutex); 210 return err; 211 } 212 213 static void ipc_wwan_dellink(void *ctxt, struct net_device *dev, 214 struct list_head *head) 215 { 216 struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(dev); 217 struct iosm_wwan *ipc_wwan = ctxt; 218 int if_id = priv->if_id; 219 220 if (WARN_ON(if_id < IP_MUX_SESSION_START || 221 if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))) 222 return; 223 224 mutex_lock(&ipc_wwan->if_mutex); 225 226 if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv)) 227 goto unlock; 228 229 RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL); 230 /* unregistering includes synchronize_net() */ 231 unregister_netdevice_queue(dev, head); 232 233 unlock: 234 mutex_unlock(&ipc_wwan->if_mutex); 235 } 236 237 static const struct wwan_ops iosm_wwan_ops = { 238 .priv_size = sizeof(struct iosm_netdev_priv), 239 .setup = ipc_wwan_setup, 240 .newlink = ipc_wwan_newlink, 241 .dellink = ipc_wwan_dellink, 242 }; 243 244 int ipc_wwan_receive(struct iosm_wwan *ipc_wwan, struct sk_buff *skb_arg, 245 bool dss, int if_id) 246 { 247 struct sk_buff *skb = skb_arg; 248 struct net_device_stats *stats; 249 struct iosm_netdev_priv *priv; 250 int ret; 251 252 if ((skb->data[0] & IOSM_IP_TYPE_MASK) == IOSM_IP_TYPE_IPV4) 253 skb->protocol = htons(ETH_P_IP); 254 else if ((skb->data[0] & IOSM_IP_TYPE_MASK) == 255 IOSM_IP_TYPE_IPV6) 256 skb->protocol = htons(ETH_P_IPV6); 257 258 skb->pkt_type = PACKET_HOST; 259 260 if (if_id < IP_MUX_SESSION_START || 261 if_id > IP_MUX_SESSION_END) { 262 ret = -EINVAL; 263 goto free; 264 } 265 266 rcu_read_lock(); 267 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]); 268 if (!priv) { 269 ret = -EINVAL; 270 goto unlock; 271 } 272 skb->dev = priv->netdev; 273 stats = &priv->netdev->stats; 274 stats->rx_packets++; 275 stats->rx_bytes += skb->len; 276 277 ret = netif_rx(skb); 278 skb = NULL; 279 unlock: 280 rcu_read_unlock(); 281 free: 282 dev_kfree_skb(skb); 283 return ret; 284 } 285 286 void ipc_wwan_tx_flowctrl(struct iosm_wwan *ipc_wwan, int if_id, bool on) 287 { 288 struct net_device *netdev; 289 struct iosm_netdev_priv *priv; 290 bool is_tx_blk; 291 292 rcu_read_lock(); 293 priv = rcu_dereference(ipc_wwan->sub_netlist[if_id]); 294 if (!priv) { 295 rcu_read_unlock(); 296 return; 297 } 298 299 netdev = priv->netdev; 300 301 is_tx_blk = netif_queue_stopped(netdev); 302 303 if (on) 304 dev_dbg(ipc_wwan->dev, "session id[%d]: flowctrl enable", 305 if_id); 306 307 if (on && !is_tx_blk) 308 netif_stop_queue(netdev); 309 else if (!on && is_tx_blk) 310 netif_wake_queue(netdev); 311 rcu_read_unlock(); 312 } 313 314 struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev) 315 { 316 struct iosm_wwan *ipc_wwan; 317 318 ipc_wwan = kzalloc(sizeof(*ipc_wwan), GFP_KERNEL); 319 if (!ipc_wwan) 320 return NULL; 321 322 ipc_wwan->dev = dev; 323 ipc_wwan->ipc_imem = ipc_imem; 324 325 /* WWAN core will create a netdev for the default IP MUX channel */ 326 if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan, 327 IP_MUX_SESSION_DEFAULT)) { 328 kfree(ipc_wwan); 329 return NULL; 330 } 331 332 mutex_init(&ipc_wwan->if_mutex); 333 334 return ipc_wwan; 335 } 336 337 void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan) 338 { 339 /* This call will remove all child netdev(s) */ 340 wwan_unregister_ops(ipc_wwan->dev); 341 342 mutex_destroy(&ipc_wwan->if_mutex); 343 344 kfree(ipc_wwan); 345 } 346