xref: /openbmc/linux/drivers/net/thunderbolt/main.c (revision e0b65f9b)
10d0950a9SMika Westerberg // SPDX-License-Identifier: GPL-2.0
20d0950a9SMika Westerberg /*
30d0950a9SMika Westerberg  * Networking over Thunderbolt/USB4 cables using USB4NET protocol
40d0950a9SMika Westerberg  * (formerly Apple ThunderboltIP).
50d0950a9SMika Westerberg  *
60d0950a9SMika Westerberg  * Copyright (C) 2017, Intel Corporation
70d0950a9SMika Westerberg  * Authors: Amir Levy <amir.jer.levy@intel.com>
80d0950a9SMika Westerberg  *          Michael Jamet <michael.jamet@intel.com>
90d0950a9SMika Westerberg  *          Mika Westerberg <mika.westerberg@linux.intel.com>
100d0950a9SMika Westerberg  */
110d0950a9SMika Westerberg 
120d0950a9SMika Westerberg #include <linux/atomic.h>
130d0950a9SMika Westerberg #include <linux/highmem.h>
140d0950a9SMika Westerberg #include <linux/if_vlan.h>
150d0950a9SMika Westerberg #include <linux/jhash.h>
160d0950a9SMika Westerberg #include <linux/module.h>
170d0950a9SMika Westerberg #include <linux/etherdevice.h>
180d0950a9SMika Westerberg #include <linux/rtnetlink.h>
190d0950a9SMika Westerberg #include <linux/sizes.h>
200d0950a9SMika Westerberg #include <linux/thunderbolt.h>
210d0950a9SMika Westerberg #include <linux/uuid.h>
220d0950a9SMika Westerberg #include <linux/workqueue.h>
230d0950a9SMika Westerberg 
240d0950a9SMika Westerberg #include <net/ip6_checksum.h>
250d0950a9SMika Westerberg 
26f7586527SMika Westerberg #include "trace.h"
27f7586527SMika Westerberg 
280d0950a9SMika Westerberg /* Protocol timeouts in ms */
290d0950a9SMika Westerberg #define TBNET_LOGIN_DELAY	4500
300d0950a9SMika Westerberg #define TBNET_LOGIN_TIMEOUT	500
310d0950a9SMika Westerberg #define TBNET_LOGOUT_TIMEOUT	1000
320d0950a9SMika Westerberg 
330d0950a9SMika Westerberg #define TBNET_RING_SIZE		256
340d0950a9SMika Westerberg #define TBNET_LOGIN_RETRIES	60
350d0950a9SMika Westerberg #define TBNET_LOGOUT_RETRIES	10
360d0950a9SMika Westerberg #define TBNET_E2E		BIT(0)
370d0950a9SMika Westerberg #define TBNET_MATCH_FRAGS_ID	BIT(1)
380d0950a9SMika Westerberg #define TBNET_64K_FRAMES	BIT(2)
390d0950a9SMika Westerberg #define TBNET_MAX_MTU		SZ_64K
400d0950a9SMika Westerberg #define TBNET_FRAME_SIZE	SZ_4K
410d0950a9SMika Westerberg #define TBNET_MAX_PAYLOAD_SIZE	\
420d0950a9SMika Westerberg 	(TBNET_FRAME_SIZE - sizeof(struct thunderbolt_ip_frame_header))
430d0950a9SMika Westerberg /* Rx packets need to hold space for skb_shared_info */
440d0950a9SMika Westerberg #define TBNET_RX_MAX_SIZE	\
450d0950a9SMika Westerberg 	(TBNET_FRAME_SIZE + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
460d0950a9SMika Westerberg #define TBNET_RX_PAGE_ORDER	get_order(TBNET_RX_MAX_SIZE)
470d0950a9SMika Westerberg #define TBNET_RX_PAGE_SIZE	(PAGE_SIZE << TBNET_RX_PAGE_ORDER)
480d0950a9SMika Westerberg 
490d0950a9SMika Westerberg #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0))
500d0950a9SMika Westerberg 
510d0950a9SMika Westerberg /**
520d0950a9SMika Westerberg  * struct thunderbolt_ip_frame_header - Header for each Thunderbolt frame
530d0950a9SMika Westerberg  * @frame_size: size of the data with the frame
540d0950a9SMika Westerberg  * @frame_index: running index on the frames
550d0950a9SMika Westerberg  * @frame_id: ID of the frame to match frames to specific packet
560d0950a9SMika Westerberg  * @frame_count: how many frames assembles a full packet
570d0950a9SMika Westerberg  *
580d0950a9SMika Westerberg  * Each data frame passed to the high-speed DMA ring has this header. If
590d0950a9SMika Westerberg  * the XDomain network directory announces that %TBNET_MATCH_FRAGS_ID is
600d0950a9SMika Westerberg  * supported then @frame_id is filled, otherwise it stays %0.
610d0950a9SMika Westerberg  */
620d0950a9SMika Westerberg struct thunderbolt_ip_frame_header {
630d0950a9SMika Westerberg 	__le32 frame_size;
640d0950a9SMika Westerberg 	__le16 frame_index;
650d0950a9SMika Westerberg 	__le16 frame_id;
660d0950a9SMika Westerberg 	__le32 frame_count;
670d0950a9SMika Westerberg };
680d0950a9SMika Westerberg 
690d0950a9SMika Westerberg enum thunderbolt_ip_frame_pdf {
700d0950a9SMika Westerberg 	TBIP_PDF_FRAME_START = 1,
710d0950a9SMika Westerberg 	TBIP_PDF_FRAME_END,
720d0950a9SMika Westerberg };
730d0950a9SMika Westerberg 
740d0950a9SMika Westerberg enum thunderbolt_ip_type {
750d0950a9SMika Westerberg 	TBIP_LOGIN,
760d0950a9SMika Westerberg 	TBIP_LOGIN_RESPONSE,
770d0950a9SMika Westerberg 	TBIP_LOGOUT,
780d0950a9SMika Westerberg 	TBIP_STATUS,
790d0950a9SMika Westerberg };
800d0950a9SMika Westerberg 
810d0950a9SMika Westerberg struct thunderbolt_ip_header {
820d0950a9SMika Westerberg 	u32 route_hi;
830d0950a9SMika Westerberg 	u32 route_lo;
840d0950a9SMika Westerberg 	u32 length_sn;
850d0950a9SMika Westerberg 	uuid_t uuid;
860d0950a9SMika Westerberg 	uuid_t initiator_uuid;
870d0950a9SMika Westerberg 	uuid_t target_uuid;
880d0950a9SMika Westerberg 	u32 type;
890d0950a9SMika Westerberg 	u32 command_id;
900d0950a9SMika Westerberg };
910d0950a9SMika Westerberg 
920d0950a9SMika Westerberg #define TBIP_HDR_LENGTH_MASK		GENMASK(5, 0)
930d0950a9SMika Westerberg #define TBIP_HDR_SN_MASK		GENMASK(28, 27)
940d0950a9SMika Westerberg #define TBIP_HDR_SN_SHIFT		27
950d0950a9SMika Westerberg 
960d0950a9SMika Westerberg struct thunderbolt_ip_login {
970d0950a9SMika Westerberg 	struct thunderbolt_ip_header hdr;
980d0950a9SMika Westerberg 	u32 proto_version;
990d0950a9SMika Westerberg 	u32 transmit_path;
1000d0950a9SMika Westerberg 	u32 reserved[4];
1010d0950a9SMika Westerberg };
1020d0950a9SMika Westerberg 
1030d0950a9SMika Westerberg #define TBIP_LOGIN_PROTO_VERSION	1
1040d0950a9SMika Westerberg 
1050d0950a9SMika Westerberg struct thunderbolt_ip_login_response {
1060d0950a9SMika Westerberg 	struct thunderbolt_ip_header hdr;
1070d0950a9SMika Westerberg 	u32 status;
1080d0950a9SMika Westerberg 	u32 receiver_mac[2];
1090d0950a9SMika Westerberg 	u32 receiver_mac_len;
1100d0950a9SMika Westerberg 	u32 reserved[4];
1110d0950a9SMika Westerberg };
1120d0950a9SMika Westerberg 
1130d0950a9SMika Westerberg struct thunderbolt_ip_logout {
1140d0950a9SMika Westerberg 	struct thunderbolt_ip_header hdr;
1150d0950a9SMika Westerberg };
1160d0950a9SMika Westerberg 
1170d0950a9SMika Westerberg struct thunderbolt_ip_status {
1180d0950a9SMika Westerberg 	struct thunderbolt_ip_header hdr;
1190d0950a9SMika Westerberg 	u32 status;
1200d0950a9SMika Westerberg };
1210d0950a9SMika Westerberg 
1220d0950a9SMika Westerberg struct tbnet_stats {
1230d0950a9SMika Westerberg 	u64 tx_packets;
1240d0950a9SMika Westerberg 	u64 rx_packets;
1250d0950a9SMika Westerberg 	u64 tx_bytes;
1260d0950a9SMika Westerberg 	u64 rx_bytes;
1270d0950a9SMika Westerberg 	u64 rx_errors;
1280d0950a9SMika Westerberg 	u64 tx_errors;
1290d0950a9SMika Westerberg 	u64 rx_length_errors;
1300d0950a9SMika Westerberg 	u64 rx_over_errors;
1310d0950a9SMika Westerberg 	u64 rx_crc_errors;
1320d0950a9SMika Westerberg 	u64 rx_missed_errors;
1330d0950a9SMika Westerberg };
1340d0950a9SMika Westerberg 
1350d0950a9SMika Westerberg struct tbnet_frame {
1360d0950a9SMika Westerberg 	struct net_device *dev;
1370d0950a9SMika Westerberg 	struct page *page;
1380d0950a9SMika Westerberg 	struct ring_frame frame;
1390d0950a9SMika Westerberg };
1400d0950a9SMika Westerberg 
1410d0950a9SMika Westerberg struct tbnet_ring {
1420d0950a9SMika Westerberg 	struct tbnet_frame frames[TBNET_RING_SIZE];
1430d0950a9SMika Westerberg 	unsigned int cons;
1440d0950a9SMika Westerberg 	unsigned int prod;
1450d0950a9SMika Westerberg 	struct tb_ring *ring;
1460d0950a9SMika Westerberg };
1470d0950a9SMika Westerberg 
1480d0950a9SMika Westerberg /**
1490d0950a9SMika Westerberg  * struct tbnet - ThunderboltIP network driver private data
1500d0950a9SMika Westerberg  * @svc: XDomain service the driver is bound to
1519c60f2a4SMika Westerberg  * @xd: XDomain the service belongs to
1520d0950a9SMika Westerberg  * @handler: ThunderboltIP configuration protocol handler
1530d0950a9SMika Westerberg  * @dev: Networking device
1540d0950a9SMika Westerberg  * @napi: NAPI structure for Rx polling
1550d0950a9SMika Westerberg  * @stats: Network statistics
1560d0950a9SMika Westerberg  * @skb: Network packet that is currently processed on Rx path
1570d0950a9SMika Westerberg  * @command_id: ID used for next configuration protocol packet
1580d0950a9SMika Westerberg  * @login_sent: ThunderboltIP login message successfully sent
1590d0950a9SMika Westerberg  * @login_received: ThunderboltIP login message received from the remote
1600d0950a9SMika Westerberg  *		    host
1610d0950a9SMika Westerberg  * @local_transmit_path: HopID we are using to send out packets
1620d0950a9SMika Westerberg  * @remote_transmit_path: HopID the other end is using to send packets to us
1630d0950a9SMika Westerberg  * @connection_lock: Lock serializing access to @login_sent,
1640d0950a9SMika Westerberg  *		     @login_received and @transmit_path.
1650d0950a9SMika Westerberg  * @login_retries: Number of login retries currently done
1660d0950a9SMika Westerberg  * @login_work: Worker to send ThunderboltIP login packets
1670d0950a9SMika Westerberg  * @connected_work: Worker that finalizes the ThunderboltIP connection
1680d0950a9SMika Westerberg  *		    setup and enables DMA paths for high speed data
1690d0950a9SMika Westerberg  *		    transfers
1700d0950a9SMika Westerberg  * @disconnect_work: Worker that handles tearing down the ThunderboltIP
1710d0950a9SMika Westerberg  *		     connection
1720d0950a9SMika Westerberg  * @rx_hdr: Copy of the currently processed Rx frame. Used when a
1730d0950a9SMika Westerberg  *	    network packet consists of multiple Thunderbolt frames.
1740d0950a9SMika Westerberg  *	    In host byte order.
1750d0950a9SMika Westerberg  * @rx_ring: Software ring holding Rx frames
1760d0950a9SMika Westerberg  * @frame_id: Frame ID use for next Tx packet
1770d0950a9SMika Westerberg  *            (if %TBNET_MATCH_FRAGS_ID is supported in both ends)
1780d0950a9SMika Westerberg  * @tx_ring: Software ring holding Tx frames
1790d0950a9SMika Westerberg  */
1800d0950a9SMika Westerberg struct tbnet {
1810d0950a9SMika Westerberg 	const struct tb_service *svc;
1820d0950a9SMika Westerberg 	struct tb_xdomain *xd;
1830d0950a9SMika Westerberg 	struct tb_protocol_handler handler;
1840d0950a9SMika Westerberg 	struct net_device *dev;
1850d0950a9SMika Westerberg 	struct napi_struct napi;
1860d0950a9SMika Westerberg 	struct tbnet_stats stats;
1870d0950a9SMika Westerberg 	struct sk_buff *skb;
1880d0950a9SMika Westerberg 	atomic_t command_id;
1890d0950a9SMika Westerberg 	bool login_sent;
1900d0950a9SMika Westerberg 	bool login_received;
1910d0950a9SMika Westerberg 	int local_transmit_path;
1920d0950a9SMika Westerberg 	int remote_transmit_path;
1930d0950a9SMika Westerberg 	struct mutex connection_lock;
1940d0950a9SMika Westerberg 	int login_retries;
1950d0950a9SMika Westerberg 	struct delayed_work login_work;
1960d0950a9SMika Westerberg 	struct work_struct connected_work;
1970d0950a9SMika Westerberg 	struct work_struct disconnect_work;
1980d0950a9SMika Westerberg 	struct thunderbolt_ip_frame_header rx_hdr;
1990d0950a9SMika Westerberg 	struct tbnet_ring rx_ring;
2000d0950a9SMika Westerberg 	atomic_t frame_id;
2010d0950a9SMika Westerberg 	struct tbnet_ring tx_ring;
2020d0950a9SMika Westerberg };
2030d0950a9SMika Westerberg 
2040d0950a9SMika Westerberg /* Network property directory UUID: c66189ca-1cce-4195-bdb8-49592e5f5a4f */
2050d0950a9SMika Westerberg static const uuid_t tbnet_dir_uuid =
2060d0950a9SMika Westerberg 	UUID_INIT(0xc66189ca, 0x1cce, 0x4195,
2070d0950a9SMika Westerberg 		  0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f);
2080d0950a9SMika Westerberg 
2090d0950a9SMika Westerberg /* ThunderboltIP protocol UUID: 798f589e-3616-8a47-97c6-5664a920c8dd */
2100d0950a9SMika Westerberg static const uuid_t tbnet_svc_uuid =
2110d0950a9SMika Westerberg 	UUID_INIT(0x798f589e, 0x3616, 0x8a47,
2120d0950a9SMika Westerberg 		  0x97, 0xc6, 0x56, 0x64, 0xa9, 0x20, 0xc8, 0xdd);
2130d0950a9SMika Westerberg 
2140d0950a9SMika Westerberg static struct tb_property_dir *tbnet_dir;
2150d0950a9SMika Westerberg 
2160d0950a9SMika Westerberg static bool tbnet_e2e = true;
2170d0950a9SMika Westerberg module_param_named(e2e, tbnet_e2e, bool, 0444);
2180d0950a9SMika Westerberg MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
2190d0950a9SMika Westerberg 
tbnet_fill_header(struct thunderbolt_ip_header * hdr,u64 route,u8 sequence,const uuid_t * initiator_uuid,const uuid_t * target_uuid,enum thunderbolt_ip_type type,size_t size,u32 command_id)2200d0950a9SMika Westerberg static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
2210d0950a9SMika Westerberg 	u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
2220d0950a9SMika Westerberg 	enum thunderbolt_ip_type type, size_t size, u32 command_id)
2230d0950a9SMika Westerberg {
2240d0950a9SMika Westerberg 	u32 length_sn;
2250d0950a9SMika Westerberg 
2260d0950a9SMika Westerberg 	/* Length does not include route_hi/lo and length_sn fields */
2270d0950a9SMika Westerberg 	length_sn = (size - 3 * 4) / 4;
2280d0950a9SMika Westerberg 	length_sn |= (sequence << TBIP_HDR_SN_SHIFT) & TBIP_HDR_SN_MASK;
2290d0950a9SMika Westerberg 
2300d0950a9SMika Westerberg 	hdr->route_hi = upper_32_bits(route);
2310d0950a9SMika Westerberg 	hdr->route_lo = lower_32_bits(route);
2320d0950a9SMika Westerberg 	hdr->length_sn = length_sn;
2330d0950a9SMika Westerberg 	uuid_copy(&hdr->uuid, &tbnet_svc_uuid);
2340d0950a9SMika Westerberg 	uuid_copy(&hdr->initiator_uuid, initiator_uuid);
2350d0950a9SMika Westerberg 	uuid_copy(&hdr->target_uuid, target_uuid);
2360d0950a9SMika Westerberg 	hdr->type = type;
2370d0950a9SMika Westerberg 	hdr->command_id = command_id;
2380d0950a9SMika Westerberg }
2390d0950a9SMika Westerberg 
tbnet_login_response(struct tbnet * net,u64 route,u8 sequence,u32 command_id)2400d0950a9SMika Westerberg static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence,
2410d0950a9SMika Westerberg 				u32 command_id)
2420d0950a9SMika Westerberg {
2430d0950a9SMika Westerberg 	struct thunderbolt_ip_login_response reply;
2440d0950a9SMika Westerberg 	struct tb_xdomain *xd = net->xd;
2450d0950a9SMika Westerberg 
2460d0950a9SMika Westerberg 	memset(&reply, 0, sizeof(reply));
2470d0950a9SMika Westerberg 	tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
2480d0950a9SMika Westerberg 			  xd->remote_uuid, TBIP_LOGIN_RESPONSE, sizeof(reply),
2490d0950a9SMika Westerberg 			  command_id);
2500d0950a9SMika Westerberg 	memcpy(reply.receiver_mac, net->dev->dev_addr, ETH_ALEN);
2510d0950a9SMika Westerberg 	reply.receiver_mac_len = ETH_ALEN;
2520d0950a9SMika Westerberg 
2530d0950a9SMika Westerberg 	return tb_xdomain_response(xd, &reply, sizeof(reply),
2540d0950a9SMika Westerberg 				   TB_CFG_PKG_XDOMAIN_RESP);
2550d0950a9SMika Westerberg }
2560d0950a9SMika Westerberg 
tbnet_login_request(struct tbnet * net,u8 sequence)2570d0950a9SMika Westerberg static int tbnet_login_request(struct tbnet *net, u8 sequence)
2580d0950a9SMika Westerberg {
2590d0950a9SMika Westerberg 	struct thunderbolt_ip_login_response reply;
2600d0950a9SMika Westerberg 	struct thunderbolt_ip_login request;
2610d0950a9SMika Westerberg 	struct tb_xdomain *xd = net->xd;
2620d0950a9SMika Westerberg 
2630d0950a9SMika Westerberg 	memset(&request, 0, sizeof(request));
2640d0950a9SMika Westerberg 	tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid,
2650d0950a9SMika Westerberg 			  xd->remote_uuid, TBIP_LOGIN, sizeof(request),
2660d0950a9SMika Westerberg 			  atomic_inc_return(&net->command_id));
2670d0950a9SMika Westerberg 
2680d0950a9SMika Westerberg 	request.proto_version = TBIP_LOGIN_PROTO_VERSION;
2690d0950a9SMika Westerberg 	request.transmit_path = net->local_transmit_path;
2700d0950a9SMika Westerberg 
2710d0950a9SMika Westerberg 	return tb_xdomain_request(xd, &request, sizeof(request),
2720d0950a9SMika Westerberg 				  TB_CFG_PKG_XDOMAIN_RESP, &reply,
2730d0950a9SMika Westerberg 				  sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
2740d0950a9SMika Westerberg 				  TBNET_LOGIN_TIMEOUT);
2750d0950a9SMika Westerberg }
2760d0950a9SMika Westerberg 
tbnet_logout_response(struct tbnet * net,u64 route,u8 sequence,u32 command_id)2770d0950a9SMika Westerberg static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence,
2780d0950a9SMika Westerberg 				 u32 command_id)
2790d0950a9SMika Westerberg {
2800d0950a9SMika Westerberg 	struct thunderbolt_ip_status reply;
2810d0950a9SMika Westerberg 	struct tb_xdomain *xd = net->xd;
2820d0950a9SMika Westerberg 
2830d0950a9SMika Westerberg 	memset(&reply, 0, sizeof(reply));
2840d0950a9SMika Westerberg 	tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid,
2850d0950a9SMika Westerberg 			  xd->remote_uuid, TBIP_STATUS, sizeof(reply),
2860d0950a9SMika Westerberg 			  atomic_inc_return(&net->command_id));
2870d0950a9SMika Westerberg 	return tb_xdomain_response(xd, &reply, sizeof(reply),
2880d0950a9SMika Westerberg 				   TB_CFG_PKG_XDOMAIN_RESP);
2890d0950a9SMika Westerberg }
2900d0950a9SMika Westerberg 
tbnet_logout_request(struct tbnet * net)2910d0950a9SMika Westerberg static int tbnet_logout_request(struct tbnet *net)
2920d0950a9SMika Westerberg {
2930d0950a9SMika Westerberg 	struct thunderbolt_ip_logout request;
2940d0950a9SMika Westerberg 	struct thunderbolt_ip_status reply;
2950d0950a9SMika Westerberg 	struct tb_xdomain *xd = net->xd;
2960d0950a9SMika Westerberg 
2970d0950a9SMika Westerberg 	memset(&request, 0, sizeof(request));
2980d0950a9SMika Westerberg 	tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid,
2990d0950a9SMika Westerberg 			  xd->remote_uuid, TBIP_LOGOUT, sizeof(request),
3000d0950a9SMika Westerberg 			  atomic_inc_return(&net->command_id));
3010d0950a9SMika Westerberg 
3020d0950a9SMika Westerberg 	return tb_xdomain_request(xd, &request, sizeof(request),
3030d0950a9SMika Westerberg 				  TB_CFG_PKG_XDOMAIN_RESP, &reply,
3040d0950a9SMika Westerberg 				  sizeof(reply), TB_CFG_PKG_XDOMAIN_RESP,
3050d0950a9SMika Westerberg 				  TBNET_LOGOUT_TIMEOUT);
3060d0950a9SMika Westerberg }
3070d0950a9SMika Westerberg 
start_login(struct tbnet * net)3080d0950a9SMika Westerberg static void start_login(struct tbnet *net)
3090d0950a9SMika Westerberg {
3107b3502c1SMika Westerberg 	netdev_dbg(net->dev, "login started\n");
3117b3502c1SMika Westerberg 
3120d0950a9SMika Westerberg 	mutex_lock(&net->connection_lock);
3130d0950a9SMika Westerberg 	net->login_sent = false;
3140d0950a9SMika Westerberg 	net->login_received = false;
3150d0950a9SMika Westerberg 	mutex_unlock(&net->connection_lock);
3160d0950a9SMika Westerberg 
3170d0950a9SMika Westerberg 	queue_delayed_work(system_long_wq, &net->login_work,
3180d0950a9SMika Westerberg 			   msecs_to_jiffies(1000));
3190d0950a9SMika Westerberg }
3200d0950a9SMika Westerberg 
stop_login(struct tbnet * net)3210d0950a9SMika Westerberg static void stop_login(struct tbnet *net)
3220d0950a9SMika Westerberg {
3230d0950a9SMika Westerberg 	cancel_delayed_work_sync(&net->login_work);
3240d0950a9SMika Westerberg 	cancel_work_sync(&net->connected_work);
3257b3502c1SMika Westerberg 
3267b3502c1SMika Westerberg 	netdev_dbg(net->dev, "login stopped\n");
3270d0950a9SMika Westerberg }
3280d0950a9SMika Westerberg 
tbnet_frame_size(const struct tbnet_frame * tf)3290d0950a9SMika Westerberg static inline unsigned int tbnet_frame_size(const struct tbnet_frame *tf)
3300d0950a9SMika Westerberg {
3310d0950a9SMika Westerberg 	return tf->frame.size ? : TBNET_FRAME_SIZE;
3320d0950a9SMika Westerberg }
3330d0950a9SMika Westerberg 
tbnet_free_buffers(struct tbnet_ring * ring)3340d0950a9SMika Westerberg static void tbnet_free_buffers(struct tbnet_ring *ring)
3350d0950a9SMika Westerberg {
3360d0950a9SMika Westerberg 	unsigned int i;
3370d0950a9SMika Westerberg 
3380d0950a9SMika Westerberg 	for (i = 0; i < TBNET_RING_SIZE; i++) {
3390d0950a9SMika Westerberg 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
3400d0950a9SMika Westerberg 		struct tbnet_frame *tf = &ring->frames[i];
3410d0950a9SMika Westerberg 		enum dma_data_direction dir;
3420d0950a9SMika Westerberg 		unsigned int order;
3430d0950a9SMika Westerberg 		size_t size;
3440d0950a9SMika Westerberg 
3450d0950a9SMika Westerberg 		if (!tf->page)
3460d0950a9SMika Westerberg 			continue;
3470d0950a9SMika Westerberg 
3480d0950a9SMika Westerberg 		if (ring->ring->is_tx) {
3490d0950a9SMika Westerberg 			dir = DMA_TO_DEVICE;
3500d0950a9SMika Westerberg 			order = 0;
3510d0950a9SMika Westerberg 			size = TBNET_FRAME_SIZE;
3520d0950a9SMika Westerberg 		} else {
3530d0950a9SMika Westerberg 			dir = DMA_FROM_DEVICE;
3540d0950a9SMika Westerberg 			order = TBNET_RX_PAGE_ORDER;
3550d0950a9SMika Westerberg 			size = TBNET_RX_PAGE_SIZE;
3560d0950a9SMika Westerberg 		}
3570d0950a9SMika Westerberg 
358f7586527SMika Westerberg 		trace_tbnet_free_frame(i, tf->page, tf->frame.buffer_phy, dir);
359f7586527SMika Westerberg 
3600d0950a9SMika Westerberg 		if (tf->frame.buffer_phy)
3610d0950a9SMika Westerberg 			dma_unmap_page(dma_dev, tf->frame.buffer_phy, size,
3620d0950a9SMika Westerberg 				       dir);
3630d0950a9SMika Westerberg 
3640d0950a9SMika Westerberg 		__free_pages(tf->page, order);
3650d0950a9SMika Westerberg 		tf->page = NULL;
3660d0950a9SMika Westerberg 	}
3670d0950a9SMika Westerberg 
3680d0950a9SMika Westerberg 	ring->cons = 0;
3690d0950a9SMika Westerberg 	ring->prod = 0;
3700d0950a9SMika Westerberg }
3710d0950a9SMika Westerberg 
tbnet_tear_down(struct tbnet * net,bool send_logout)3720d0950a9SMika Westerberg static void tbnet_tear_down(struct tbnet *net, bool send_logout)
3730d0950a9SMika Westerberg {
3740d0950a9SMika Westerberg 	netif_carrier_off(net->dev);
3750d0950a9SMika Westerberg 	netif_stop_queue(net->dev);
3760d0950a9SMika Westerberg 
3770d0950a9SMika Westerberg 	stop_login(net);
3780d0950a9SMika Westerberg 
3790d0950a9SMika Westerberg 	mutex_lock(&net->connection_lock);
3800d0950a9SMika Westerberg 
3810d0950a9SMika Westerberg 	if (net->login_sent && net->login_received) {
3820d0950a9SMika Westerberg 		int ret, retries = TBNET_LOGOUT_RETRIES;
3830d0950a9SMika Westerberg 
3840d0950a9SMika Westerberg 		while (send_logout && retries-- > 0) {
3857b3502c1SMika Westerberg 			netdev_dbg(net->dev, "sending logout request %u\n",
3867b3502c1SMika Westerberg 				   retries);
3870d0950a9SMika Westerberg 			ret = tbnet_logout_request(net);
3880d0950a9SMika Westerberg 			if (ret != -ETIMEDOUT)
3890d0950a9SMika Westerberg 				break;
3900d0950a9SMika Westerberg 		}
3910d0950a9SMika Westerberg 
3920d0950a9SMika Westerberg 		tb_ring_stop(net->rx_ring.ring);
3930d0950a9SMika Westerberg 		tb_ring_stop(net->tx_ring.ring);
3940d0950a9SMika Westerberg 		tbnet_free_buffers(&net->rx_ring);
3950d0950a9SMika Westerberg 		tbnet_free_buffers(&net->tx_ring);
3960d0950a9SMika Westerberg 
3970d0950a9SMika Westerberg 		ret = tb_xdomain_disable_paths(net->xd,
3980d0950a9SMika Westerberg 					       net->local_transmit_path,
3990d0950a9SMika Westerberg 					       net->rx_ring.ring->hop,
4000d0950a9SMika Westerberg 					       net->remote_transmit_path,
4010d0950a9SMika Westerberg 					       net->tx_ring.ring->hop);
4020d0950a9SMika Westerberg 		if (ret)
4030d0950a9SMika Westerberg 			netdev_warn(net->dev, "failed to disable DMA paths\n");
4040d0950a9SMika Westerberg 
4050d0950a9SMika Westerberg 		tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
4060d0950a9SMika Westerberg 		net->remote_transmit_path = 0;
4070d0950a9SMika Westerberg 	}
4080d0950a9SMika Westerberg 
4090d0950a9SMika Westerberg 	net->login_retries = 0;
4100d0950a9SMika Westerberg 	net->login_sent = false;
4110d0950a9SMika Westerberg 	net->login_received = false;
4120d0950a9SMika Westerberg 
4137b3502c1SMika Westerberg 	netdev_dbg(net->dev, "network traffic stopped\n");
4147b3502c1SMika Westerberg 
4150d0950a9SMika Westerberg 	mutex_unlock(&net->connection_lock);
4160d0950a9SMika Westerberg }
4170d0950a9SMika Westerberg 
tbnet_handle_packet(const void * buf,size_t size,void * data)4180d0950a9SMika Westerberg static int tbnet_handle_packet(const void *buf, size_t size, void *data)
4190d0950a9SMika Westerberg {
4200d0950a9SMika Westerberg 	const struct thunderbolt_ip_login *pkg = buf;
4210d0950a9SMika Westerberg 	struct tbnet *net = data;
4220d0950a9SMika Westerberg 	u32 command_id;
4230d0950a9SMika Westerberg 	int ret = 0;
4240d0950a9SMika Westerberg 	u32 sequence;
4250d0950a9SMika Westerberg 	u64 route;
4260d0950a9SMika Westerberg 
4270d0950a9SMika Westerberg 	/* Make sure the packet is for us */
4280d0950a9SMika Westerberg 	if (size < sizeof(struct thunderbolt_ip_header))
4290d0950a9SMika Westerberg 		return 0;
4300d0950a9SMika Westerberg 	if (!uuid_equal(&pkg->hdr.initiator_uuid, net->xd->remote_uuid))
4310d0950a9SMika Westerberg 		return 0;
4320d0950a9SMika Westerberg 	if (!uuid_equal(&pkg->hdr.target_uuid, net->xd->local_uuid))
4330d0950a9SMika Westerberg 		return 0;
4340d0950a9SMika Westerberg 
4350d0950a9SMika Westerberg 	route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo;
4360d0950a9SMika Westerberg 	route &= ~BIT_ULL(63);
4370d0950a9SMika Westerberg 	if (route != net->xd->route)
4380d0950a9SMika Westerberg 		return 0;
4390d0950a9SMika Westerberg 
4400d0950a9SMika Westerberg 	sequence = pkg->hdr.length_sn & TBIP_HDR_SN_MASK;
4410d0950a9SMika Westerberg 	sequence >>= TBIP_HDR_SN_SHIFT;
4420d0950a9SMika Westerberg 	command_id = pkg->hdr.command_id;
4430d0950a9SMika Westerberg 
4440d0950a9SMika Westerberg 	switch (pkg->hdr.type) {
4450d0950a9SMika Westerberg 	case TBIP_LOGIN:
4467b3502c1SMika Westerberg 		netdev_dbg(net->dev, "remote login request received\n");
4470d0950a9SMika Westerberg 		if (!netif_running(net->dev))
4480d0950a9SMika Westerberg 			break;
4490d0950a9SMika Westerberg 
4500d0950a9SMika Westerberg 		ret = tbnet_login_response(net, route, sequence,
4510d0950a9SMika Westerberg 					   pkg->hdr.command_id);
4520d0950a9SMika Westerberg 		if (!ret) {
4537b3502c1SMika Westerberg 			netdev_dbg(net->dev, "remote login response sent\n");
4547b3502c1SMika Westerberg 
4550d0950a9SMika Westerberg 			mutex_lock(&net->connection_lock);
4560d0950a9SMika Westerberg 			net->login_received = true;
4570d0950a9SMika Westerberg 			net->remote_transmit_path = pkg->transmit_path;
4580d0950a9SMika Westerberg 
4590d0950a9SMika Westerberg 			/* If we reached the number of max retries or
4600d0950a9SMika Westerberg 			 * previous logout, schedule another round of
4610d0950a9SMika Westerberg 			 * login retries
4620d0950a9SMika Westerberg 			 */
4630d0950a9SMika Westerberg 			if (net->login_retries >= TBNET_LOGIN_RETRIES ||
4640d0950a9SMika Westerberg 			    !net->login_sent) {
4650d0950a9SMika Westerberg 				net->login_retries = 0;
4660d0950a9SMika Westerberg 				queue_delayed_work(system_long_wq,
4670d0950a9SMika Westerberg 						   &net->login_work, 0);
4680d0950a9SMika Westerberg 			}
4690d0950a9SMika Westerberg 			mutex_unlock(&net->connection_lock);
4700d0950a9SMika Westerberg 
4710d0950a9SMika Westerberg 			queue_work(system_long_wq, &net->connected_work);
4720d0950a9SMika Westerberg 		}
4730d0950a9SMika Westerberg 		break;
4740d0950a9SMika Westerberg 
4750d0950a9SMika Westerberg 	case TBIP_LOGOUT:
4767b3502c1SMika Westerberg 		netdev_dbg(net->dev, "remote logout request received\n");
4770d0950a9SMika Westerberg 		ret = tbnet_logout_response(net, route, sequence, command_id);
4787b3502c1SMika Westerberg 		if (!ret) {
4797b3502c1SMika Westerberg 			netdev_dbg(net->dev, "remote logout response sent\n");
4800d0950a9SMika Westerberg 			queue_work(system_long_wq, &net->disconnect_work);
4817b3502c1SMika Westerberg 		}
4820d0950a9SMika Westerberg 		break;
4830d0950a9SMika Westerberg 
4840d0950a9SMika Westerberg 	default:
4850d0950a9SMika Westerberg 		return 0;
4860d0950a9SMika Westerberg 	}
4870d0950a9SMika Westerberg 
4880d0950a9SMika Westerberg 	if (ret)
4890d0950a9SMika Westerberg 		netdev_warn(net->dev, "failed to send ThunderboltIP response\n");
4900d0950a9SMika Westerberg 
4910d0950a9SMika Westerberg 	return 1;
4920d0950a9SMika Westerberg }
4930d0950a9SMika Westerberg 
tbnet_available_buffers(const struct tbnet_ring * ring)4940d0950a9SMika Westerberg static unsigned int tbnet_available_buffers(const struct tbnet_ring *ring)
4950d0950a9SMika Westerberg {
4960d0950a9SMika Westerberg 	return ring->prod - ring->cons;
4970d0950a9SMika Westerberg }
4980d0950a9SMika Westerberg 
tbnet_alloc_rx_buffers(struct tbnet * net,unsigned int nbuffers)4990d0950a9SMika Westerberg static int tbnet_alloc_rx_buffers(struct tbnet *net, unsigned int nbuffers)
5000d0950a9SMika Westerberg {
5010d0950a9SMika Westerberg 	struct tbnet_ring *ring = &net->rx_ring;
5020d0950a9SMika Westerberg 	int ret;
5030d0950a9SMika Westerberg 
5040d0950a9SMika Westerberg 	while (nbuffers--) {
5050d0950a9SMika Westerberg 		struct device *dma_dev = tb_ring_dma_device(ring->ring);
5060d0950a9SMika Westerberg 		unsigned int index = ring->prod & (TBNET_RING_SIZE - 1);
5070d0950a9SMika Westerberg 		struct tbnet_frame *tf = &ring->frames[index];
5080d0950a9SMika Westerberg 		dma_addr_t dma_addr;
5090d0950a9SMika Westerberg 
5100d0950a9SMika Westerberg 		if (tf->page)
5110d0950a9SMika Westerberg 			break;
5120d0950a9SMika Westerberg 
5130d0950a9SMika Westerberg 		/* Allocate page (order > 0) so that it can hold maximum
5140d0950a9SMika Westerberg 		 * ThunderboltIP frame (4kB) and the additional room for
5150d0950a9SMika Westerberg 		 * SKB shared info required by build_skb().
5160d0950a9SMika Westerberg 		 */
5170d0950a9SMika Westerberg 		tf->page = dev_alloc_pages(TBNET_RX_PAGE_ORDER);
5180d0950a9SMika Westerberg 		if (!tf->page) {
5190d0950a9SMika Westerberg 			ret = -ENOMEM;
5200d0950a9SMika Westerberg 			goto err_free;
5210d0950a9SMika Westerberg 		}
5220d0950a9SMika Westerberg 
5230d0950a9SMika Westerberg 		dma_addr = dma_map_page(dma_dev, tf->page, 0,
5240d0950a9SMika Westerberg 					TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
5250d0950a9SMika Westerberg 		if (dma_mapping_error(dma_dev, dma_addr)) {
5260d0950a9SMika Westerberg 			ret = -ENOMEM;
5270d0950a9SMika Westerberg 			goto err_free;
5280d0950a9SMika Westerberg 		}
5290d0950a9SMika Westerberg 
5300d0950a9SMika Westerberg 		tf->frame.buffer_phy = dma_addr;
5310d0950a9SMika Westerberg 		tf->dev = net->dev;
5320d0950a9SMika Westerberg 
533f7586527SMika Westerberg 		trace_tbnet_alloc_rx_frame(index, tf->page, dma_addr,
534f7586527SMika Westerberg 					   DMA_FROM_DEVICE);
535f7586527SMika Westerberg 
5360d0950a9SMika Westerberg 		tb_ring_rx(ring->ring, &tf->frame);
5370d0950a9SMika Westerberg 
5380d0950a9SMika Westerberg 		ring->prod++;
5390d0950a9SMika Westerberg 	}
5400d0950a9SMika Westerberg 
5410d0950a9SMika Westerberg 	return 0;
5420d0950a9SMika Westerberg 
5430d0950a9SMika Westerberg err_free:
5440d0950a9SMika Westerberg 	tbnet_free_buffers(ring);
5450d0950a9SMika Westerberg 	return ret;
5460d0950a9SMika Westerberg }
5470d0950a9SMika Westerberg 
tbnet_get_tx_buffer(struct tbnet * net)5480d0950a9SMika Westerberg static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
5490d0950a9SMika Westerberg {
5500d0950a9SMika Westerberg 	struct tbnet_ring *ring = &net->tx_ring;
5510d0950a9SMika Westerberg 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
5520d0950a9SMika Westerberg 	struct tbnet_frame *tf;
5530d0950a9SMika Westerberg 	unsigned int index;
5540d0950a9SMika Westerberg 
5550d0950a9SMika Westerberg 	if (!tbnet_available_buffers(ring))
5560d0950a9SMika Westerberg 		return NULL;
5570d0950a9SMika Westerberg 
5580d0950a9SMika Westerberg 	index = ring->cons++ & (TBNET_RING_SIZE - 1);
5590d0950a9SMika Westerberg 
5600d0950a9SMika Westerberg 	tf = &ring->frames[index];
5610d0950a9SMika Westerberg 	tf->frame.size = 0;
5620d0950a9SMika Westerberg 
5630d0950a9SMika Westerberg 	dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
5640d0950a9SMika Westerberg 				tbnet_frame_size(tf), DMA_TO_DEVICE);
5650d0950a9SMika Westerberg 
5660d0950a9SMika Westerberg 	return tf;
5670d0950a9SMika Westerberg }
5680d0950a9SMika Westerberg 
tbnet_tx_callback(struct tb_ring * ring,struct ring_frame * frame,bool canceled)5690d0950a9SMika Westerberg static void tbnet_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
5700d0950a9SMika Westerberg 			      bool canceled)
5710d0950a9SMika Westerberg {
5720d0950a9SMika Westerberg 	struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
5730d0950a9SMika Westerberg 	struct tbnet *net = netdev_priv(tf->dev);
5740d0950a9SMika Westerberg 
5750d0950a9SMika Westerberg 	/* Return buffer to the ring */
5760d0950a9SMika Westerberg 	net->tx_ring.prod++;
5770d0950a9SMika Westerberg 
5780d0950a9SMika Westerberg 	if (tbnet_available_buffers(&net->tx_ring) >= TBNET_RING_SIZE / 2)
5790d0950a9SMika Westerberg 		netif_wake_queue(net->dev);
5800d0950a9SMika Westerberg }
5810d0950a9SMika Westerberg 
tbnet_alloc_tx_buffers(struct tbnet * net)5820d0950a9SMika Westerberg static int tbnet_alloc_tx_buffers(struct tbnet *net)
5830d0950a9SMika Westerberg {
5840d0950a9SMika Westerberg 	struct tbnet_ring *ring = &net->tx_ring;
5850d0950a9SMika Westerberg 	struct device *dma_dev = tb_ring_dma_device(ring->ring);
5860d0950a9SMika Westerberg 	unsigned int i;
5870d0950a9SMika Westerberg 
5880d0950a9SMika Westerberg 	for (i = 0; i < TBNET_RING_SIZE; i++) {
5890d0950a9SMika Westerberg 		struct tbnet_frame *tf = &ring->frames[i];
5900d0950a9SMika Westerberg 		dma_addr_t dma_addr;
5910d0950a9SMika Westerberg 
5920d0950a9SMika Westerberg 		tf->page = alloc_page(GFP_KERNEL);
5930d0950a9SMika Westerberg 		if (!tf->page) {
5940d0950a9SMika Westerberg 			tbnet_free_buffers(ring);
5950d0950a9SMika Westerberg 			return -ENOMEM;
5960d0950a9SMika Westerberg 		}
5970d0950a9SMika Westerberg 
5980d0950a9SMika Westerberg 		dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
5990d0950a9SMika Westerberg 					DMA_TO_DEVICE);
6000d0950a9SMika Westerberg 		if (dma_mapping_error(dma_dev, dma_addr)) {
6010d0950a9SMika Westerberg 			__free_page(tf->page);
6020d0950a9SMika Westerberg 			tf->page = NULL;
6030d0950a9SMika Westerberg 			tbnet_free_buffers(ring);
6040d0950a9SMika Westerberg 			return -ENOMEM;
6050d0950a9SMika Westerberg 		}
6060d0950a9SMika Westerberg 
6070d0950a9SMika Westerberg 		tf->dev = net->dev;
6080d0950a9SMika Westerberg 		tf->frame.buffer_phy = dma_addr;
6090d0950a9SMika Westerberg 		tf->frame.callback = tbnet_tx_callback;
6100d0950a9SMika Westerberg 		tf->frame.sof = TBIP_PDF_FRAME_START;
6110d0950a9SMika Westerberg 		tf->frame.eof = TBIP_PDF_FRAME_END;
612f7586527SMika Westerberg 
613f7586527SMika Westerberg 		trace_tbnet_alloc_tx_frame(i, tf->page, dma_addr, DMA_TO_DEVICE);
6140d0950a9SMika Westerberg 	}
6150d0950a9SMika Westerberg 
6160d0950a9SMika Westerberg 	ring->cons = 0;
6170d0950a9SMika Westerberg 	ring->prod = TBNET_RING_SIZE - 1;
6180d0950a9SMika Westerberg 
6190d0950a9SMika Westerberg 	return 0;
6200d0950a9SMika Westerberg }
6210d0950a9SMika Westerberg 
tbnet_connected_work(struct work_struct * work)6220d0950a9SMika Westerberg static void tbnet_connected_work(struct work_struct *work)
6230d0950a9SMika Westerberg {
6240d0950a9SMika Westerberg 	struct tbnet *net = container_of(work, typeof(*net), connected_work);
6250d0950a9SMika Westerberg 	bool connected;
6260d0950a9SMika Westerberg 	int ret;
6270d0950a9SMika Westerberg 
6280d0950a9SMika Westerberg 	if (netif_carrier_ok(net->dev))
6290d0950a9SMika Westerberg 		return;
6300d0950a9SMika Westerberg 
6310d0950a9SMika Westerberg 	mutex_lock(&net->connection_lock);
6320d0950a9SMika Westerberg 	connected = net->login_sent && net->login_received;
6330d0950a9SMika Westerberg 	mutex_unlock(&net->connection_lock);
6340d0950a9SMika Westerberg 
6350d0950a9SMika Westerberg 	if (!connected)
6360d0950a9SMika Westerberg 		return;
6370d0950a9SMika Westerberg 
6387b3502c1SMika Westerberg 	netdev_dbg(net->dev, "login successful, enabling paths\n");
6397b3502c1SMika Westerberg 
6400d0950a9SMika Westerberg 	ret = tb_xdomain_alloc_in_hopid(net->xd, net->remote_transmit_path);
6410d0950a9SMika Westerberg 	if (ret != net->remote_transmit_path) {
6420d0950a9SMika Westerberg 		netdev_err(net->dev, "failed to allocate Rx HopID\n");
6430d0950a9SMika Westerberg 		return;
6440d0950a9SMika Westerberg 	}
6450d0950a9SMika Westerberg 
6460d0950a9SMika Westerberg 	/* Both logins successful so enable the rings, high-speed DMA
6470d0950a9SMika Westerberg 	 * paths and start the network device queue.
6480d0950a9SMika Westerberg 	 *
6490d0950a9SMika Westerberg 	 * Note we enable the DMA paths last to make sure we have primed
6500d0950a9SMika Westerberg 	 * the Rx ring before any incoming packets are allowed to
6510d0950a9SMika Westerberg 	 * arrive.
6520d0950a9SMika Westerberg 	 */
6530d0950a9SMika Westerberg 	tb_ring_start(net->tx_ring.ring);
6540d0950a9SMika Westerberg 	tb_ring_start(net->rx_ring.ring);
6550d0950a9SMika Westerberg 
6560d0950a9SMika Westerberg 	ret = tbnet_alloc_rx_buffers(net, TBNET_RING_SIZE);
6570d0950a9SMika Westerberg 	if (ret)
6580d0950a9SMika Westerberg 		goto err_stop_rings;
6590d0950a9SMika Westerberg 
6600d0950a9SMika Westerberg 	ret = tbnet_alloc_tx_buffers(net);
6610d0950a9SMika Westerberg 	if (ret)
6620d0950a9SMika Westerberg 		goto err_free_rx_buffers;
6630d0950a9SMika Westerberg 
6640d0950a9SMika Westerberg 	ret = tb_xdomain_enable_paths(net->xd, net->local_transmit_path,
6650d0950a9SMika Westerberg 				      net->rx_ring.ring->hop,
6660d0950a9SMika Westerberg 				      net->remote_transmit_path,
6670d0950a9SMika Westerberg 				      net->tx_ring.ring->hop);
6680d0950a9SMika Westerberg 	if (ret) {
6690d0950a9SMika Westerberg 		netdev_err(net->dev, "failed to enable DMA paths\n");
6700d0950a9SMika Westerberg 		goto err_free_tx_buffers;
6710d0950a9SMika Westerberg 	}
6720d0950a9SMika Westerberg 
6730d0950a9SMika Westerberg 	netif_carrier_on(net->dev);
6740d0950a9SMika Westerberg 	netif_start_queue(net->dev);
6757b3502c1SMika Westerberg 
6767b3502c1SMika Westerberg 	netdev_dbg(net->dev, "network traffic started\n");
6770d0950a9SMika Westerberg 	return;
6780d0950a9SMika Westerberg 
6790d0950a9SMika Westerberg err_free_tx_buffers:
6800d0950a9SMika Westerberg 	tbnet_free_buffers(&net->tx_ring);
6810d0950a9SMika Westerberg err_free_rx_buffers:
6820d0950a9SMika Westerberg 	tbnet_free_buffers(&net->rx_ring);
6830d0950a9SMika Westerberg err_stop_rings:
6840d0950a9SMika Westerberg 	tb_ring_stop(net->rx_ring.ring);
6850d0950a9SMika Westerberg 	tb_ring_stop(net->tx_ring.ring);
6860d0950a9SMika Westerberg 	tb_xdomain_release_in_hopid(net->xd, net->remote_transmit_path);
6870d0950a9SMika Westerberg }
6880d0950a9SMika Westerberg 
tbnet_login_work(struct work_struct * work)6890d0950a9SMika Westerberg static void tbnet_login_work(struct work_struct *work)
6900d0950a9SMika Westerberg {
6910d0950a9SMika Westerberg 	struct tbnet *net = container_of(work, typeof(*net), login_work.work);
6920d0950a9SMika Westerberg 	unsigned long delay = msecs_to_jiffies(TBNET_LOGIN_DELAY);
6930d0950a9SMika Westerberg 	int ret;
6940d0950a9SMika Westerberg 
6950d0950a9SMika Westerberg 	if (netif_carrier_ok(net->dev))
6960d0950a9SMika Westerberg 		return;
6970d0950a9SMika Westerberg 
6987b3502c1SMika Westerberg 	netdev_dbg(net->dev, "sending login request, retries=%u\n",
6997b3502c1SMika Westerberg 		   net->login_retries);
7007b3502c1SMika Westerberg 
7010d0950a9SMika Westerberg 	ret = tbnet_login_request(net, net->login_retries % 4);
7020d0950a9SMika Westerberg 	if (ret) {
7037b3502c1SMika Westerberg 		netdev_dbg(net->dev, "sending login request failed, ret=%d\n",
7047b3502c1SMika Westerberg 			   ret);
7050d0950a9SMika Westerberg 		if (net->login_retries++ < TBNET_LOGIN_RETRIES) {
7060d0950a9SMika Westerberg 			queue_delayed_work(system_long_wq, &net->login_work,
7070d0950a9SMika Westerberg 					   delay);
7080d0950a9SMika Westerberg 		} else {
7090d0950a9SMika Westerberg 			netdev_info(net->dev, "ThunderboltIP login timed out\n");
7100d0950a9SMika Westerberg 		}
7110d0950a9SMika Westerberg 	} else {
7127b3502c1SMika Westerberg 		netdev_dbg(net->dev, "received login reply\n");
7137b3502c1SMika Westerberg 
7140d0950a9SMika Westerberg 		net->login_retries = 0;
7150d0950a9SMika Westerberg 
7160d0950a9SMika Westerberg 		mutex_lock(&net->connection_lock);
7170d0950a9SMika Westerberg 		net->login_sent = true;
7180d0950a9SMika Westerberg 		mutex_unlock(&net->connection_lock);
7190d0950a9SMika Westerberg 
7200d0950a9SMika Westerberg 		queue_work(system_long_wq, &net->connected_work);
7210d0950a9SMika Westerberg 	}
7220d0950a9SMika Westerberg }
7230d0950a9SMika Westerberg 
tbnet_disconnect_work(struct work_struct * work)7240d0950a9SMika Westerberg static void tbnet_disconnect_work(struct work_struct *work)
7250d0950a9SMika Westerberg {
7260d0950a9SMika Westerberg 	struct tbnet *net = container_of(work, typeof(*net), disconnect_work);
7270d0950a9SMika Westerberg 
7280d0950a9SMika Westerberg 	tbnet_tear_down(net, false);
7290d0950a9SMika Westerberg }
7300d0950a9SMika Westerberg 
tbnet_check_frame(struct tbnet * net,const struct tbnet_frame * tf,const struct thunderbolt_ip_frame_header * hdr)7310d0950a9SMika Westerberg static bool tbnet_check_frame(struct tbnet *net, const struct tbnet_frame *tf,
7320d0950a9SMika Westerberg 			      const struct thunderbolt_ip_frame_header *hdr)
7330d0950a9SMika Westerberg {
7340d0950a9SMika Westerberg 	u32 frame_id, frame_count, frame_size, frame_index;
7350d0950a9SMika Westerberg 	unsigned int size;
7360d0950a9SMika Westerberg 
7370d0950a9SMika Westerberg 	if (tf->frame.flags & RING_DESC_CRC_ERROR) {
7380d0950a9SMika Westerberg 		net->stats.rx_crc_errors++;
7390d0950a9SMika Westerberg 		return false;
7400d0950a9SMika Westerberg 	} else if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) {
7410d0950a9SMika Westerberg 		net->stats.rx_over_errors++;
7420d0950a9SMika Westerberg 		return false;
7430d0950a9SMika Westerberg 	}
7440d0950a9SMika Westerberg 
7450d0950a9SMika Westerberg 	/* Should be greater than just header i.e. contains data */
7460d0950a9SMika Westerberg 	size = tbnet_frame_size(tf);
7470d0950a9SMika Westerberg 	if (size <= sizeof(*hdr)) {
7480d0950a9SMika Westerberg 		net->stats.rx_length_errors++;
7490d0950a9SMika Westerberg 		return false;
7500d0950a9SMika Westerberg 	}
7510d0950a9SMika Westerberg 
7520d0950a9SMika Westerberg 	frame_count = le32_to_cpu(hdr->frame_count);
7530d0950a9SMika Westerberg 	frame_size = le32_to_cpu(hdr->frame_size);
7540d0950a9SMika Westerberg 	frame_index = le16_to_cpu(hdr->frame_index);
7550d0950a9SMika Westerberg 	frame_id = le16_to_cpu(hdr->frame_id);
7560d0950a9SMika Westerberg 
7570d0950a9SMika Westerberg 	if ((frame_size > size - sizeof(*hdr)) || !frame_size) {
7580d0950a9SMika Westerberg 		net->stats.rx_length_errors++;
7590d0950a9SMika Westerberg 		return false;
7600d0950a9SMika Westerberg 	}
7610d0950a9SMika Westerberg 
7620d0950a9SMika Westerberg 	/* In case we're in the middle of packet, validate the frame
7630d0950a9SMika Westerberg 	 * header based on first fragment of the packet.
7640d0950a9SMika Westerberg 	 */
7650d0950a9SMika Westerberg 	if (net->skb && net->rx_hdr.frame_count) {
7660d0950a9SMika Westerberg 		/* Check the frame count fits the count field */
76718536722SMika Westerberg 		if (frame_count != le32_to_cpu(net->rx_hdr.frame_count)) {
7680d0950a9SMika Westerberg 			net->stats.rx_length_errors++;
7690d0950a9SMika Westerberg 			return false;
7700d0950a9SMika Westerberg 		}
7710d0950a9SMika Westerberg 
7720d0950a9SMika Westerberg 		/* Check the frame identifiers are incremented correctly,
7730d0950a9SMika Westerberg 		 * and id is matching.
7740d0950a9SMika Westerberg 		 */
77518536722SMika Westerberg 		if (frame_index != le16_to_cpu(net->rx_hdr.frame_index) + 1 ||
77618536722SMika Westerberg 		    frame_id != le16_to_cpu(net->rx_hdr.frame_id)) {
7770d0950a9SMika Westerberg 			net->stats.rx_missed_errors++;
7780d0950a9SMika Westerberg 			return false;
7790d0950a9SMika Westerberg 		}
7800d0950a9SMika Westerberg 
7810d0950a9SMika Westerberg 		if (net->skb->len + frame_size > TBNET_MAX_MTU) {
7820d0950a9SMika Westerberg 			net->stats.rx_length_errors++;
7830d0950a9SMika Westerberg 			return false;
7840d0950a9SMika Westerberg 		}
7850d0950a9SMika Westerberg 
7860d0950a9SMika Westerberg 		return true;
7870d0950a9SMika Westerberg 	}
7880d0950a9SMika Westerberg 
7890d0950a9SMika Westerberg 	/* Start of packet, validate the frame header */
7900d0950a9SMika Westerberg 	if (frame_count == 0 || frame_count > TBNET_RING_SIZE / 4) {
7910d0950a9SMika Westerberg 		net->stats.rx_length_errors++;
7920d0950a9SMika Westerberg 		return false;
7930d0950a9SMika Westerberg 	}
7940d0950a9SMika Westerberg 	if (frame_index != 0) {
7950d0950a9SMika Westerberg 		net->stats.rx_missed_errors++;
7960d0950a9SMika Westerberg 		return false;
7970d0950a9SMika Westerberg 	}
7980d0950a9SMika Westerberg 
7990d0950a9SMika Westerberg 	return true;
8000d0950a9SMika Westerberg }
8010d0950a9SMika Westerberg 
tbnet_poll(struct napi_struct * napi,int budget)8020d0950a9SMika Westerberg static int tbnet_poll(struct napi_struct *napi, int budget)
8030d0950a9SMika Westerberg {
8040d0950a9SMika Westerberg 	struct tbnet *net = container_of(napi, struct tbnet, napi);
8050d0950a9SMika Westerberg 	unsigned int cleaned_count = tbnet_available_buffers(&net->rx_ring);
8060d0950a9SMika Westerberg 	struct device *dma_dev = tb_ring_dma_device(net->rx_ring.ring);
8070d0950a9SMika Westerberg 	unsigned int rx_packets = 0;
8080d0950a9SMika Westerberg 
8090d0950a9SMika Westerberg 	while (rx_packets < budget) {
8100d0950a9SMika Westerberg 		const struct thunderbolt_ip_frame_header *hdr;
8110d0950a9SMika Westerberg 		unsigned int hdr_size = sizeof(*hdr);
8120d0950a9SMika Westerberg 		struct sk_buff *skb = NULL;
8130d0950a9SMika Westerberg 		struct ring_frame *frame;
8140d0950a9SMika Westerberg 		struct tbnet_frame *tf;
8150d0950a9SMika Westerberg 		struct page *page;
8160d0950a9SMika Westerberg 		bool last = true;
8170d0950a9SMika Westerberg 		u32 frame_size;
8180d0950a9SMika Westerberg 
8190d0950a9SMika Westerberg 		/* Return some buffers to hardware, one at a time is too
8200d0950a9SMika Westerberg 		 * slow so allocate MAX_SKB_FRAGS buffers at the same
8210d0950a9SMika Westerberg 		 * time.
8220d0950a9SMika Westerberg 		 */
8230d0950a9SMika Westerberg 		if (cleaned_count >= MAX_SKB_FRAGS) {
8240d0950a9SMika Westerberg 			tbnet_alloc_rx_buffers(net, cleaned_count);
8250d0950a9SMika Westerberg 			cleaned_count = 0;
8260d0950a9SMika Westerberg 		}
8270d0950a9SMika Westerberg 
8280d0950a9SMika Westerberg 		frame = tb_ring_poll(net->rx_ring.ring);
8290d0950a9SMika Westerberg 		if (!frame)
8300d0950a9SMika Westerberg 			break;
8310d0950a9SMika Westerberg 
8320d0950a9SMika Westerberg 		dma_unmap_page(dma_dev, frame->buffer_phy,
8330d0950a9SMika Westerberg 			       TBNET_RX_PAGE_SIZE, DMA_FROM_DEVICE);
8340d0950a9SMika Westerberg 
8350d0950a9SMika Westerberg 		tf = container_of(frame, typeof(*tf), frame);
8360d0950a9SMika Westerberg 
8370d0950a9SMika Westerberg 		page = tf->page;
8380d0950a9SMika Westerberg 		tf->page = NULL;
8390d0950a9SMika Westerberg 		net->rx_ring.cons++;
8400d0950a9SMika Westerberg 		cleaned_count++;
8410d0950a9SMika Westerberg 
8420d0950a9SMika Westerberg 		hdr = page_address(page);
8430d0950a9SMika Westerberg 		if (!tbnet_check_frame(net, tf, hdr)) {
844f7586527SMika Westerberg 			trace_tbnet_invalid_rx_ip_frame(hdr->frame_size,
845f7586527SMika Westerberg 				hdr->frame_id, hdr->frame_index, hdr->frame_count);
8460d0950a9SMika Westerberg 			__free_pages(page, TBNET_RX_PAGE_ORDER);
8470d0950a9SMika Westerberg 			dev_kfree_skb_any(net->skb);
8480d0950a9SMika Westerberg 			net->skb = NULL;
8490d0950a9SMika Westerberg 			continue;
8500d0950a9SMika Westerberg 		}
8510d0950a9SMika Westerberg 
852f7586527SMika Westerberg 		trace_tbnet_rx_ip_frame(hdr->frame_size, hdr->frame_id,
853f7586527SMika Westerberg 					hdr->frame_index, hdr->frame_count);
8540d0950a9SMika Westerberg 		frame_size = le32_to_cpu(hdr->frame_size);
8550d0950a9SMika Westerberg 
8560d0950a9SMika Westerberg 		skb = net->skb;
8570d0950a9SMika Westerberg 		if (!skb) {
8580d0950a9SMika Westerberg 			skb = build_skb(page_address(page),
8590d0950a9SMika Westerberg 					TBNET_RX_PAGE_SIZE);
8600d0950a9SMika Westerberg 			if (!skb) {
8610d0950a9SMika Westerberg 				__free_pages(page, TBNET_RX_PAGE_ORDER);
8620d0950a9SMika Westerberg 				net->stats.rx_errors++;
8630d0950a9SMika Westerberg 				break;
8640d0950a9SMika Westerberg 			}
8650d0950a9SMika Westerberg 
8660d0950a9SMika Westerberg 			skb_reserve(skb, hdr_size);
8670d0950a9SMika Westerberg 			skb_put(skb, frame_size);
8680d0950a9SMika Westerberg 
8690d0950a9SMika Westerberg 			net->skb = skb;
8700d0950a9SMika Westerberg 		} else {
8710d0950a9SMika Westerberg 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
8720d0950a9SMika Westerberg 					page, hdr_size, frame_size,
8730d0950a9SMika Westerberg 					TBNET_RX_PAGE_SIZE - hdr_size);
8740d0950a9SMika Westerberg 		}
8750d0950a9SMika Westerberg 
87618536722SMika Westerberg 		net->rx_hdr.frame_size = hdr->frame_size;
87718536722SMika Westerberg 		net->rx_hdr.frame_count = hdr->frame_count;
87818536722SMika Westerberg 		net->rx_hdr.frame_index = hdr->frame_index;
87918536722SMika Westerberg 		net->rx_hdr.frame_id = hdr->frame_id;
88018536722SMika Westerberg 		last = le16_to_cpu(net->rx_hdr.frame_index) ==
88118536722SMika Westerberg 		       le32_to_cpu(net->rx_hdr.frame_count) - 1;
8820d0950a9SMika Westerberg 
8830d0950a9SMika Westerberg 		rx_packets++;
8840d0950a9SMika Westerberg 		net->stats.rx_bytes += frame_size;
8850d0950a9SMika Westerberg 
8860d0950a9SMika Westerberg 		if (last) {
8870d0950a9SMika Westerberg 			skb->protocol = eth_type_trans(skb, net->dev);
888f7586527SMika Westerberg 			trace_tbnet_rx_skb(skb);
8890d0950a9SMika Westerberg 			napi_gro_receive(&net->napi, skb);
8900d0950a9SMika Westerberg 			net->skb = NULL;
8910d0950a9SMika Westerberg 		}
8920d0950a9SMika Westerberg 	}
8930d0950a9SMika Westerberg 
8940d0950a9SMika Westerberg 	net->stats.rx_packets += rx_packets;
8950d0950a9SMika Westerberg 
8960d0950a9SMika Westerberg 	if (cleaned_count)
8970d0950a9SMika Westerberg 		tbnet_alloc_rx_buffers(net, cleaned_count);
8980d0950a9SMika Westerberg 
8990d0950a9SMika Westerberg 	if (rx_packets >= budget)
9000d0950a9SMika Westerberg 		return budget;
9010d0950a9SMika Westerberg 
9020d0950a9SMika Westerberg 	napi_complete_done(napi, rx_packets);
9030d0950a9SMika Westerberg 	/* Re-enable the ring interrupt */
9040d0950a9SMika Westerberg 	tb_ring_poll_complete(net->rx_ring.ring);
9050d0950a9SMika Westerberg 
9060d0950a9SMika Westerberg 	return rx_packets;
9070d0950a9SMika Westerberg }
9080d0950a9SMika Westerberg 
tbnet_start_poll(void * data)9090d0950a9SMika Westerberg static void tbnet_start_poll(void *data)
9100d0950a9SMika Westerberg {
9110d0950a9SMika Westerberg 	struct tbnet *net = data;
9120d0950a9SMika Westerberg 
9130d0950a9SMika Westerberg 	napi_schedule(&net->napi);
9140d0950a9SMika Westerberg }
9150d0950a9SMika Westerberg 
tbnet_open(struct net_device * dev)9160d0950a9SMika Westerberg static int tbnet_open(struct net_device *dev)
9170d0950a9SMika Westerberg {
9180d0950a9SMika Westerberg 	struct tbnet *net = netdev_priv(dev);
9190d0950a9SMika Westerberg 	struct tb_xdomain *xd = net->xd;
9200d0950a9SMika Westerberg 	u16 sof_mask, eof_mask;
9210d0950a9SMika Westerberg 	struct tb_ring *ring;
9220d0950a9SMika Westerberg 	unsigned int flags;
9230d0950a9SMika Westerberg 	int hopid;
9240d0950a9SMika Westerberg 
9250d0950a9SMika Westerberg 	netif_carrier_off(dev);
9260d0950a9SMika Westerberg 
9270d0950a9SMika Westerberg 	ring = tb_ring_alloc_tx(xd->tb->nhi, -1, TBNET_RING_SIZE,
9280d0950a9SMika Westerberg 				RING_FLAG_FRAME);
9290d0950a9SMika Westerberg 	if (!ring) {
9300d0950a9SMika Westerberg 		netdev_err(dev, "failed to allocate Tx ring\n");
9310d0950a9SMika Westerberg 		return -ENOMEM;
9320d0950a9SMika Westerberg 	}
9330d0950a9SMika Westerberg 	net->tx_ring.ring = ring;
9340d0950a9SMika Westerberg 
9350d0950a9SMika Westerberg 	hopid = tb_xdomain_alloc_out_hopid(xd, -1);
9360d0950a9SMika Westerberg 	if (hopid < 0) {
9370d0950a9SMika Westerberg 		netdev_err(dev, "failed to allocate Tx HopID\n");
9380d0950a9SMika Westerberg 		tb_ring_free(net->tx_ring.ring);
9390d0950a9SMika Westerberg 		net->tx_ring.ring = NULL;
9400d0950a9SMika Westerberg 		return hopid;
9410d0950a9SMika Westerberg 	}
9420d0950a9SMika Westerberg 	net->local_transmit_path = hopid;
9430d0950a9SMika Westerberg 
9440d0950a9SMika Westerberg 	sof_mask = BIT(TBIP_PDF_FRAME_START);
9450d0950a9SMika Westerberg 	eof_mask = BIT(TBIP_PDF_FRAME_END);
9460d0950a9SMika Westerberg 
9470d0950a9SMika Westerberg 	flags = RING_FLAG_FRAME;
9480d0950a9SMika Westerberg 	/* Only enable full E2E if the other end supports it too */
9490d0950a9SMika Westerberg 	if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
9500d0950a9SMika Westerberg 		flags |= RING_FLAG_E2E;
9510d0950a9SMika Westerberg 
9520d0950a9SMika Westerberg 	ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
9530d0950a9SMika Westerberg 				net->tx_ring.ring->hop, sof_mask,
9540d0950a9SMika Westerberg 				eof_mask, tbnet_start_poll, net);
9550d0950a9SMika Westerberg 	if (!ring) {
9560d0950a9SMika Westerberg 		netdev_err(dev, "failed to allocate Rx ring\n");
9570d0950a9SMika Westerberg 		tb_xdomain_release_out_hopid(xd, hopid);
9580d0950a9SMika Westerberg 		tb_ring_free(net->tx_ring.ring);
9590d0950a9SMika Westerberg 		net->tx_ring.ring = NULL;
9600d0950a9SMika Westerberg 		return -ENOMEM;
9610d0950a9SMika Westerberg 	}
9620d0950a9SMika Westerberg 	net->rx_ring.ring = ring;
9630d0950a9SMika Westerberg 
9640d0950a9SMika Westerberg 	napi_enable(&net->napi);
9650d0950a9SMika Westerberg 	start_login(net);
9660d0950a9SMika Westerberg 
9670d0950a9SMika Westerberg 	return 0;
9680d0950a9SMika Westerberg }
9690d0950a9SMika Westerberg 
tbnet_stop(struct net_device * dev)9700d0950a9SMika Westerberg static int tbnet_stop(struct net_device *dev)
9710d0950a9SMika Westerberg {
9720d0950a9SMika Westerberg 	struct tbnet *net = netdev_priv(dev);
9730d0950a9SMika Westerberg 
9740d0950a9SMika Westerberg 	napi_disable(&net->napi);
9750d0950a9SMika Westerberg 
9760d0950a9SMika Westerberg 	cancel_work_sync(&net->disconnect_work);
9770d0950a9SMika Westerberg 	tbnet_tear_down(net, true);
9780d0950a9SMika Westerberg 
9790d0950a9SMika Westerberg 	tb_ring_free(net->rx_ring.ring);
9800d0950a9SMika Westerberg 	net->rx_ring.ring = NULL;
9810d0950a9SMika Westerberg 
9820d0950a9SMika Westerberg 	tb_xdomain_release_out_hopid(net->xd, net->local_transmit_path);
9830d0950a9SMika Westerberg 	tb_ring_free(net->tx_ring.ring);
9840d0950a9SMika Westerberg 	net->tx_ring.ring = NULL;
9850d0950a9SMika Westerberg 
9860d0950a9SMika Westerberg 	return 0;
9870d0950a9SMika Westerberg }
9880d0950a9SMika Westerberg 
tbnet_xmit_csum_and_map(struct tbnet * net,struct sk_buff * skb,struct tbnet_frame ** frames,u32 frame_count)9890d0950a9SMika Westerberg static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
9900d0950a9SMika Westerberg 	struct tbnet_frame **frames, u32 frame_count)
9910d0950a9SMika Westerberg {
9920d0950a9SMika Westerberg 	struct thunderbolt_ip_frame_header *hdr = page_address(frames[0]->page);
9930d0950a9SMika Westerberg 	struct device *dma_dev = tb_ring_dma_device(net->tx_ring.ring);
9940d0950a9SMika Westerberg 	unsigned int i, len, offset = skb_transport_offset(skb);
9955bbec0adSMika Westerberg 	/* Remove payload length from checksum */
9965bbec0adSMika Westerberg 	u32 paylen = skb->len - skb_transport_offset(skb);
9975bbec0adSMika Westerberg 	__wsum wsum = (__force __wsum)htonl(paylen);
9980d0950a9SMika Westerberg 	__be16 protocol = skb->protocol;
9990d0950a9SMika Westerberg 	void *data = skb->data;
10000d0950a9SMika Westerberg 	void *dest = hdr + 1;
10010d0950a9SMika Westerberg 	__sum16 *tucso;
10020d0950a9SMika Westerberg 
10030d0950a9SMika Westerberg 	if (skb->ip_summed != CHECKSUM_PARTIAL) {
10040d0950a9SMika Westerberg 		/* No need to calculate checksum so we just update the
10050d0950a9SMika Westerberg 		 * total frame count and sync the frames for DMA.
10060d0950a9SMika Westerberg 		 */
10070d0950a9SMika Westerberg 		for (i = 0; i < frame_count; i++) {
10080d0950a9SMika Westerberg 			hdr = page_address(frames[i]->page);
10090d0950a9SMika Westerberg 			hdr->frame_count = cpu_to_le32(frame_count);
1010f7586527SMika Westerberg 			trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
1011f7586527SMika Westerberg 						hdr->frame_index, hdr->frame_count);
10120d0950a9SMika Westerberg 			dma_sync_single_for_device(dma_dev,
10130d0950a9SMika Westerberg 				frames[i]->frame.buffer_phy,
10140d0950a9SMika Westerberg 				tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
10150d0950a9SMika Westerberg 		}
10160d0950a9SMika Westerberg 
10170d0950a9SMika Westerberg 		return true;
10180d0950a9SMika Westerberg 	}
10190d0950a9SMika Westerberg 
10200d0950a9SMika Westerberg 	if (protocol == htons(ETH_P_8021Q)) {
10210d0950a9SMika Westerberg 		struct vlan_hdr *vhdr, vh;
10220d0950a9SMika Westerberg 
10230d0950a9SMika Westerberg 		vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(vh), &vh);
10240d0950a9SMika Westerberg 		if (!vhdr)
10250d0950a9SMika Westerberg 			return false;
10260d0950a9SMika Westerberg 
10270d0950a9SMika Westerberg 		protocol = vhdr->h_vlan_encapsulated_proto;
10280d0950a9SMika Westerberg 	}
10290d0950a9SMika Westerberg 
10300d0950a9SMika Westerberg 	/* Data points on the beginning of packet.
10310d0950a9SMika Westerberg 	 * Check is the checksum absolute place in the packet.
10320d0950a9SMika Westerberg 	 * ipcso will update IP checksum.
10339c60f2a4SMika Westerberg 	 * tucso will update TCP/UDP checksum.
10340d0950a9SMika Westerberg 	 */
10350d0950a9SMika Westerberg 	if (protocol == htons(ETH_P_IP)) {
10360d0950a9SMika Westerberg 		__sum16 *ipcso = dest + ((void *)&(ip_hdr(skb)->check) - data);
10370d0950a9SMika Westerberg 
10380d0950a9SMika Westerberg 		*ipcso = 0;
10390d0950a9SMika Westerberg 		*ipcso = ip_fast_csum(dest + skb_network_offset(skb),
10400d0950a9SMika Westerberg 				      ip_hdr(skb)->ihl);
10410d0950a9SMika Westerberg 
10420d0950a9SMika Westerberg 		if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10430d0950a9SMika Westerberg 			tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
10440d0950a9SMika Westerberg 		else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
10450d0950a9SMika Westerberg 			tucso = dest + ((void *)&(udp_hdr(skb)->check) - data);
10460d0950a9SMika Westerberg 		else
10470d0950a9SMika Westerberg 			return false;
10480d0950a9SMika Westerberg 
10490d0950a9SMika Westerberg 		*tucso = ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10500d0950a9SMika Westerberg 					    ip_hdr(skb)->daddr, 0,
10510d0950a9SMika Westerberg 					    ip_hdr(skb)->protocol, 0);
1052*e0b65f9bSMika Westerberg 	} else if (skb_is_gso(skb) && skb_is_gso_v6(skb)) {
10530d0950a9SMika Westerberg 		tucso = dest + ((void *)&(tcp_hdr(skb)->check) - data);
10540d0950a9SMika Westerberg 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10550d0950a9SMika Westerberg 					  &ipv6_hdr(skb)->daddr, 0,
10560d0950a9SMika Westerberg 					  IPPROTO_TCP, 0);
10570d0950a9SMika Westerberg 	} else if (protocol == htons(ETH_P_IPV6)) {
10580d0950a9SMika Westerberg 		tucso = dest + skb_checksum_start_offset(skb) + skb->csum_offset;
10590d0950a9SMika Westerberg 		*tucso = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10600d0950a9SMika Westerberg 					  &ipv6_hdr(skb)->daddr, 0,
10610d0950a9SMika Westerberg 					  ipv6_hdr(skb)->nexthdr, 0);
10620d0950a9SMika Westerberg 	} else {
10630d0950a9SMika Westerberg 		return false;
10640d0950a9SMika Westerberg 	}
10650d0950a9SMika Westerberg 
10660d0950a9SMika Westerberg 	/* First frame was headers, rest of the frames contain data.
10670d0950a9SMika Westerberg 	 * Calculate checksum over each frame.
10680d0950a9SMika Westerberg 	 */
10690d0950a9SMika Westerberg 	for (i = 0; i < frame_count; i++) {
10700d0950a9SMika Westerberg 		hdr = page_address(frames[i]->page);
10710d0950a9SMika Westerberg 		dest = (void *)(hdr + 1) + offset;
10720d0950a9SMika Westerberg 		len = le32_to_cpu(hdr->frame_size) - offset;
10730d0950a9SMika Westerberg 		wsum = csum_partial(dest, len, wsum);
10740d0950a9SMika Westerberg 		hdr->frame_count = cpu_to_le32(frame_count);
1075f7586527SMika Westerberg 		trace_tbnet_tx_ip_frame(hdr->frame_size, hdr->frame_id,
1076f7586527SMika Westerberg 					hdr->frame_index, hdr->frame_count);
10770d0950a9SMika Westerberg 
10780d0950a9SMika Westerberg 		offset = 0;
10790d0950a9SMika Westerberg 	}
10800d0950a9SMika Westerberg 
10810d0950a9SMika Westerberg 	*tucso = csum_fold(wsum);
10820d0950a9SMika Westerberg 
10830d0950a9SMika Westerberg 	/* Checksum is finally calculated and we don't touch the memory
10840d0950a9SMika Westerberg 	 * anymore, so DMA sync the frames now.
10850d0950a9SMika Westerberg 	 */
10860d0950a9SMika Westerberg 	for (i = 0; i < frame_count; i++) {
10870d0950a9SMika Westerberg 		dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
10880d0950a9SMika Westerberg 			tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
10890d0950a9SMika Westerberg 	}
10900d0950a9SMika Westerberg 
10910d0950a9SMika Westerberg 	return true;
10920d0950a9SMika Westerberg }
10930d0950a9SMika Westerberg 
tbnet_kmap_frag(struct sk_buff * skb,unsigned int frag_num,unsigned int * len)10940d0950a9SMika Westerberg static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
10950d0950a9SMika Westerberg 			     unsigned int *len)
10960d0950a9SMika Westerberg {
10970d0950a9SMika Westerberg 	const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num];
10980d0950a9SMika Westerberg 
10990d0950a9SMika Westerberg 	*len = skb_frag_size(frag);
11000d0950a9SMika Westerberg 	return kmap_local_page(skb_frag_page(frag)) + skb_frag_off(frag);
11010d0950a9SMika Westerberg }
11020d0950a9SMika Westerberg 
tbnet_start_xmit(struct sk_buff * skb,struct net_device * dev)11030d0950a9SMika Westerberg static netdev_tx_t tbnet_start_xmit(struct sk_buff *skb,
11040d0950a9SMika Westerberg 				    struct net_device *dev)
11050d0950a9SMika Westerberg {
11060d0950a9SMika Westerberg 	struct tbnet *net = netdev_priv(dev);
11070d0950a9SMika Westerberg 	struct tbnet_frame *frames[MAX_SKB_FRAGS];
11080d0950a9SMika Westerberg 	u16 frame_id = atomic_read(&net->frame_id);
11090d0950a9SMika Westerberg 	struct thunderbolt_ip_frame_header *hdr;
11100d0950a9SMika Westerberg 	unsigned int len = skb_headlen(skb);
11110d0950a9SMika Westerberg 	unsigned int data_len = skb->len;
11120d0950a9SMika Westerberg 	unsigned int nframes, i;
11130d0950a9SMika Westerberg 	unsigned int frag = 0;
11140d0950a9SMika Westerberg 	void *src = skb->data;
11150d0950a9SMika Westerberg 	u32 frame_index = 0;
11160d0950a9SMika Westerberg 	bool unmap = false;
11170d0950a9SMika Westerberg 	void *dest;
11180d0950a9SMika Westerberg 
1119f7586527SMika Westerberg 	trace_tbnet_tx_skb(skb);
1120f7586527SMika Westerberg 
11210d0950a9SMika Westerberg 	nframes = DIV_ROUND_UP(data_len, TBNET_MAX_PAYLOAD_SIZE);
11220d0950a9SMika Westerberg 	if (tbnet_available_buffers(&net->tx_ring) < nframes) {
11230d0950a9SMika Westerberg 		netif_stop_queue(net->dev);
11240d0950a9SMika Westerberg 		return NETDEV_TX_BUSY;
11250d0950a9SMika Westerberg 	}
11260d0950a9SMika Westerberg 
11270d0950a9SMika Westerberg 	frames[frame_index] = tbnet_get_tx_buffer(net);
11280d0950a9SMika Westerberg 	if (!frames[frame_index])
11290d0950a9SMika Westerberg 		goto err_drop;
11300d0950a9SMika Westerberg 
11310d0950a9SMika Westerberg 	hdr = page_address(frames[frame_index]->page);
11320d0950a9SMika Westerberg 	dest = hdr + 1;
11330d0950a9SMika Westerberg 
11340d0950a9SMika Westerberg 	/* If overall packet is bigger than the frame data size */
11350d0950a9SMika Westerberg 	while (data_len > TBNET_MAX_PAYLOAD_SIZE) {
11360d0950a9SMika Westerberg 		unsigned int size_left = TBNET_MAX_PAYLOAD_SIZE;
11370d0950a9SMika Westerberg 
11380d0950a9SMika Westerberg 		hdr->frame_size = cpu_to_le32(TBNET_MAX_PAYLOAD_SIZE);
11390d0950a9SMika Westerberg 		hdr->frame_index = cpu_to_le16(frame_index);
11400d0950a9SMika Westerberg 		hdr->frame_id = cpu_to_le16(frame_id);
11410d0950a9SMika Westerberg 
11420d0950a9SMika Westerberg 		do {
11430d0950a9SMika Westerberg 			if (len > size_left) {
11440d0950a9SMika Westerberg 				/* Copy data onto Tx buffer data with
11450d0950a9SMika Westerberg 				 * full frame size then break and go to
11460d0950a9SMika Westerberg 				 * next frame
11470d0950a9SMika Westerberg 				 */
11480d0950a9SMika Westerberg 				memcpy(dest, src, size_left);
11490d0950a9SMika Westerberg 				len -= size_left;
11500d0950a9SMika Westerberg 				dest += size_left;
11510d0950a9SMika Westerberg 				src += size_left;
11520d0950a9SMika Westerberg 				break;
11530d0950a9SMika Westerberg 			}
11540d0950a9SMika Westerberg 
11550d0950a9SMika Westerberg 			memcpy(dest, src, len);
11560d0950a9SMika Westerberg 			size_left -= len;
11570d0950a9SMika Westerberg 			dest += len;
11580d0950a9SMika Westerberg 
11590d0950a9SMika Westerberg 			if (unmap) {
11600d0950a9SMika Westerberg 				kunmap_local(src);
11610d0950a9SMika Westerberg 				unmap = false;
11620d0950a9SMika Westerberg 			}
11630d0950a9SMika Westerberg 
11640d0950a9SMika Westerberg 			/* Ensure all fragments have been processed */
11650d0950a9SMika Westerberg 			if (frag < skb_shinfo(skb)->nr_frags) {
11660d0950a9SMika Westerberg 				/* Map and then unmap quickly */
11670d0950a9SMika Westerberg 				src = tbnet_kmap_frag(skb, frag++, &len);
11680d0950a9SMika Westerberg 				unmap = true;
11690d0950a9SMika Westerberg 			} else if (unlikely(size_left > 0)) {
11700d0950a9SMika Westerberg 				goto err_drop;
11710d0950a9SMika Westerberg 			}
11720d0950a9SMika Westerberg 		} while (size_left > 0);
11730d0950a9SMika Westerberg 
11740d0950a9SMika Westerberg 		data_len -= TBNET_MAX_PAYLOAD_SIZE;
11750d0950a9SMika Westerberg 		frame_index++;
11760d0950a9SMika Westerberg 
11770d0950a9SMika Westerberg 		frames[frame_index] = tbnet_get_tx_buffer(net);
11780d0950a9SMika Westerberg 		if (!frames[frame_index])
11790d0950a9SMika Westerberg 			goto err_drop;
11800d0950a9SMika Westerberg 
11810d0950a9SMika Westerberg 		hdr = page_address(frames[frame_index]->page);
11820d0950a9SMika Westerberg 		dest = hdr + 1;
11830d0950a9SMika Westerberg 	}
11840d0950a9SMika Westerberg 
11850d0950a9SMika Westerberg 	hdr->frame_size = cpu_to_le32(data_len);
11860d0950a9SMika Westerberg 	hdr->frame_index = cpu_to_le16(frame_index);
11870d0950a9SMika Westerberg 	hdr->frame_id = cpu_to_le16(frame_id);
11880d0950a9SMika Westerberg 
11890d0950a9SMika Westerberg 	frames[frame_index]->frame.size = data_len + sizeof(*hdr);
11900d0950a9SMika Westerberg 
11910d0950a9SMika Westerberg 	/* In case the remaining data_len is smaller than a frame */
11920d0950a9SMika Westerberg 	while (len < data_len) {
11930d0950a9SMika Westerberg 		memcpy(dest, src, len);
11940d0950a9SMika Westerberg 		data_len -= len;
11950d0950a9SMika Westerberg 		dest += len;
11960d0950a9SMika Westerberg 
11970d0950a9SMika Westerberg 		if (unmap) {
11980d0950a9SMika Westerberg 			kunmap_local(src);
11990d0950a9SMika Westerberg 			unmap = false;
12000d0950a9SMika Westerberg 		}
12010d0950a9SMika Westerberg 
12020d0950a9SMika Westerberg 		if (frag < skb_shinfo(skb)->nr_frags) {
12030d0950a9SMika Westerberg 			src = tbnet_kmap_frag(skb, frag++, &len);
12040d0950a9SMika Westerberg 			unmap = true;
12050d0950a9SMika Westerberg 		} else if (unlikely(data_len > 0)) {
12060d0950a9SMika Westerberg 			goto err_drop;
12070d0950a9SMika Westerberg 		}
12080d0950a9SMika Westerberg 	}
12090d0950a9SMika Westerberg 
12100d0950a9SMika Westerberg 	memcpy(dest, src, data_len);
12110d0950a9SMika Westerberg 
12120d0950a9SMika Westerberg 	if (unmap)
12130d0950a9SMika Westerberg 		kunmap_local(src);
12140d0950a9SMika Westerberg 
12150d0950a9SMika Westerberg 	if (!tbnet_xmit_csum_and_map(net, skb, frames, frame_index + 1))
12160d0950a9SMika Westerberg 		goto err_drop;
12170d0950a9SMika Westerberg 
12180d0950a9SMika Westerberg 	for (i = 0; i < frame_index + 1; i++)
12190d0950a9SMika Westerberg 		tb_ring_tx(net->tx_ring.ring, &frames[i]->frame);
12200d0950a9SMika Westerberg 
12210d0950a9SMika Westerberg 	if (net->svc->prtcstns & TBNET_MATCH_FRAGS_ID)
12220d0950a9SMika Westerberg 		atomic_inc(&net->frame_id);
12230d0950a9SMika Westerberg 
12240d0950a9SMika Westerberg 	net->stats.tx_packets++;
12250d0950a9SMika Westerberg 	net->stats.tx_bytes += skb->len;
12260d0950a9SMika Westerberg 
1227f7586527SMika Westerberg 	trace_tbnet_consume_skb(skb);
12280d0950a9SMika Westerberg 	dev_consume_skb_any(skb);
12290d0950a9SMika Westerberg 
12300d0950a9SMika Westerberg 	return NETDEV_TX_OK;
12310d0950a9SMika Westerberg 
12320d0950a9SMika Westerberg err_drop:
12330d0950a9SMika Westerberg 	/* We can re-use the buffers */
12340d0950a9SMika Westerberg 	net->tx_ring.cons -= frame_index;
12350d0950a9SMika Westerberg 
12360d0950a9SMika Westerberg 	dev_kfree_skb_any(skb);
12370d0950a9SMika Westerberg 	net->stats.tx_errors++;
12380d0950a9SMika Westerberg 
12390d0950a9SMika Westerberg 	return NETDEV_TX_OK;
12400d0950a9SMika Westerberg }
12410d0950a9SMika Westerberg 
tbnet_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)12420d0950a9SMika Westerberg static void tbnet_get_stats64(struct net_device *dev,
12430d0950a9SMika Westerberg 			      struct rtnl_link_stats64 *stats)
12440d0950a9SMika Westerberg {
12450d0950a9SMika Westerberg 	struct tbnet *net = netdev_priv(dev);
12460d0950a9SMika Westerberg 
12470d0950a9SMika Westerberg 	stats->tx_packets = net->stats.tx_packets;
12480d0950a9SMika Westerberg 	stats->rx_packets = net->stats.rx_packets;
12490d0950a9SMika Westerberg 	stats->tx_bytes = net->stats.tx_bytes;
12500d0950a9SMika Westerberg 	stats->rx_bytes = net->stats.rx_bytes;
12510d0950a9SMika Westerberg 	stats->rx_errors = net->stats.rx_errors + net->stats.rx_length_errors +
12520d0950a9SMika Westerberg 		net->stats.rx_over_errors + net->stats.rx_crc_errors +
12530d0950a9SMika Westerberg 		net->stats.rx_missed_errors;
12540d0950a9SMika Westerberg 	stats->tx_errors = net->stats.tx_errors;
12550d0950a9SMika Westerberg 	stats->rx_length_errors = net->stats.rx_length_errors;
12560d0950a9SMika Westerberg 	stats->rx_over_errors = net->stats.rx_over_errors;
12570d0950a9SMika Westerberg 	stats->rx_crc_errors = net->stats.rx_crc_errors;
12580d0950a9SMika Westerberg 	stats->rx_missed_errors = net->stats.rx_missed_errors;
12590d0950a9SMika Westerberg }
12600d0950a9SMika Westerberg 
12610d0950a9SMika Westerberg static const struct net_device_ops tbnet_netdev_ops = {
12620d0950a9SMika Westerberg 	.ndo_open = tbnet_open,
12630d0950a9SMika Westerberg 	.ndo_stop = tbnet_stop,
12640d0950a9SMika Westerberg 	.ndo_start_xmit = tbnet_start_xmit,
12650d0950a9SMika Westerberg 	.ndo_get_stats64 = tbnet_get_stats64,
12660d0950a9SMika Westerberg };
12670d0950a9SMika Westerberg 
tbnet_generate_mac(struct net_device * dev)12680d0950a9SMika Westerberg static void tbnet_generate_mac(struct net_device *dev)
12690d0950a9SMika Westerberg {
12700d0950a9SMika Westerberg 	const struct tbnet *net = netdev_priv(dev);
12710d0950a9SMika Westerberg 	const struct tb_xdomain *xd = net->xd;
12720d0950a9SMika Westerberg 	u8 addr[ETH_ALEN];
12730d0950a9SMika Westerberg 	u8 phy_port;
12740d0950a9SMika Westerberg 	u32 hash;
12750d0950a9SMika Westerberg 
12760d0950a9SMika Westerberg 	phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route));
12770d0950a9SMika Westerberg 
12780d0950a9SMika Westerberg 	/* Unicast and locally administered MAC */
12790d0950a9SMika Westerberg 	addr[0] = phy_port << 4 | 0x02;
12800d0950a9SMika Westerberg 	hash = jhash2((u32 *)xd->local_uuid, 4, 0);
12810d0950a9SMika Westerberg 	memcpy(addr + 1, &hash, sizeof(hash));
12820d0950a9SMika Westerberg 	hash = jhash2((u32 *)xd->local_uuid, 4, hash);
12830d0950a9SMika Westerberg 	addr[5] = hash & 0xff;
12840d0950a9SMika Westerberg 	eth_hw_addr_set(dev, addr);
12850d0950a9SMika Westerberg }
12860d0950a9SMika Westerberg 
tbnet_probe(struct tb_service * svc,const struct tb_service_id * id)12870d0950a9SMika Westerberg static int tbnet_probe(struct tb_service *svc, const struct tb_service_id *id)
12880d0950a9SMika Westerberg {
12890d0950a9SMika Westerberg 	struct tb_xdomain *xd = tb_service_parent(svc);
12900d0950a9SMika Westerberg 	struct net_device *dev;
12910d0950a9SMika Westerberg 	struct tbnet *net;
12920d0950a9SMika Westerberg 	int ret;
12930d0950a9SMika Westerberg 
12940d0950a9SMika Westerberg 	dev = alloc_etherdev(sizeof(*net));
12950d0950a9SMika Westerberg 	if (!dev)
12960d0950a9SMika Westerberg 		return -ENOMEM;
12970d0950a9SMika Westerberg 
12980d0950a9SMika Westerberg 	SET_NETDEV_DEV(dev, &svc->dev);
12990d0950a9SMika Westerberg 
13000d0950a9SMika Westerberg 	net = netdev_priv(dev);
13010d0950a9SMika Westerberg 	INIT_DELAYED_WORK(&net->login_work, tbnet_login_work);
13020d0950a9SMika Westerberg 	INIT_WORK(&net->connected_work, tbnet_connected_work);
13030d0950a9SMika Westerberg 	INIT_WORK(&net->disconnect_work, tbnet_disconnect_work);
13040d0950a9SMika Westerberg 	mutex_init(&net->connection_lock);
13050d0950a9SMika Westerberg 	atomic_set(&net->command_id, 0);
13060d0950a9SMika Westerberg 	atomic_set(&net->frame_id, 0);
13070d0950a9SMika Westerberg 	net->svc = svc;
13080d0950a9SMika Westerberg 	net->dev = dev;
13090d0950a9SMika Westerberg 	net->xd = xd;
13100d0950a9SMika Westerberg 
13110d0950a9SMika Westerberg 	tbnet_generate_mac(dev);
13120d0950a9SMika Westerberg 
13130d0950a9SMika Westerberg 	strcpy(dev->name, "thunderbolt%d");
13140d0950a9SMika Westerberg 	dev->netdev_ops = &tbnet_netdev_ops;
13150d0950a9SMika Westerberg 
13160d0950a9SMika Westerberg 	/* ThunderboltIP takes advantage of TSO packets but instead of
13170d0950a9SMika Westerberg 	 * segmenting them we just split the packet into Thunderbolt
13180d0950a9SMika Westerberg 	 * frames (maximum payload size of each frame is 4084 bytes) and
13190d0950a9SMika Westerberg 	 * calculate checksum over the whole packet here.
13200d0950a9SMika Westerberg 	 *
13210d0950a9SMika Westerberg 	 * The receiving side does the opposite if the host OS supports
13220d0950a9SMika Westerberg 	 * LRO, otherwise it needs to split the large packet into MTU
13230d0950a9SMika Westerberg 	 * sized smaller packets.
13240d0950a9SMika Westerberg 	 *
13250d0950a9SMika Westerberg 	 * In order to receive large packets from the networking stack,
13260d0950a9SMika Westerberg 	 * we need to announce support for most of the offloading
13270d0950a9SMika Westerberg 	 * features here.
13280d0950a9SMika Westerberg 	 */
13290d0950a9SMika Westerberg 	dev->hw_features = NETIF_F_SG | NETIF_F_ALL_TSO | NETIF_F_GRO |
13300d0950a9SMika Westerberg 			   NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
13310d0950a9SMika Westerberg 	dev->features = dev->hw_features | NETIF_F_HIGHDMA;
13320d0950a9SMika Westerberg 	dev->hard_header_len += sizeof(struct thunderbolt_ip_frame_header);
13330d0950a9SMika Westerberg 
13340d0950a9SMika Westerberg 	netif_napi_add(dev, &net->napi, tbnet_poll);
13350d0950a9SMika Westerberg 
13360d0950a9SMika Westerberg 	/* MTU range: 68 - 65522 */
13370d0950a9SMika Westerberg 	dev->min_mtu = ETH_MIN_MTU;
13380d0950a9SMika Westerberg 	dev->max_mtu = TBNET_MAX_MTU - ETH_HLEN;
13390d0950a9SMika Westerberg 
13400d0950a9SMika Westerberg 	net->handler.uuid = &tbnet_svc_uuid;
13410d0950a9SMika Westerberg 	net->handler.callback = tbnet_handle_packet;
13420d0950a9SMika Westerberg 	net->handler.data = net;
13430d0950a9SMika Westerberg 	tb_register_protocol_handler(&net->handler);
13440d0950a9SMika Westerberg 
13450d0950a9SMika Westerberg 	tb_service_set_drvdata(svc, net);
13460d0950a9SMika Westerberg 
13470d0950a9SMika Westerberg 	ret = register_netdev(dev);
13480d0950a9SMika Westerberg 	if (ret) {
13490d0950a9SMika Westerberg 		tb_unregister_protocol_handler(&net->handler);
13500d0950a9SMika Westerberg 		free_netdev(dev);
13510d0950a9SMika Westerberg 		return ret;
13520d0950a9SMika Westerberg 	}
13530d0950a9SMika Westerberg 
13540d0950a9SMika Westerberg 	return 0;
13550d0950a9SMika Westerberg }
13560d0950a9SMika Westerberg 
tbnet_remove(struct tb_service * svc)13570d0950a9SMika Westerberg static void tbnet_remove(struct tb_service *svc)
13580d0950a9SMika Westerberg {
13590d0950a9SMika Westerberg 	struct tbnet *net = tb_service_get_drvdata(svc);
13600d0950a9SMika Westerberg 
13610d0950a9SMika Westerberg 	unregister_netdev(net->dev);
13620d0950a9SMika Westerberg 	tb_unregister_protocol_handler(&net->handler);
13630d0950a9SMika Westerberg 	free_netdev(net->dev);
13640d0950a9SMika Westerberg }
13650d0950a9SMika Westerberg 
tbnet_shutdown(struct tb_service * svc)13660d0950a9SMika Westerberg static void tbnet_shutdown(struct tb_service *svc)
13670d0950a9SMika Westerberg {
13680d0950a9SMika Westerberg 	tbnet_tear_down(tb_service_get_drvdata(svc), true);
13690d0950a9SMika Westerberg }
13700d0950a9SMika Westerberg 
tbnet_suspend(struct device * dev)13710d0950a9SMika Westerberg static int tbnet_suspend(struct device *dev)
13720d0950a9SMika Westerberg {
13730d0950a9SMika Westerberg 	struct tb_service *svc = tb_to_service(dev);
13740d0950a9SMika Westerberg 	struct tbnet *net = tb_service_get_drvdata(svc);
13750d0950a9SMika Westerberg 
13760d0950a9SMika Westerberg 	stop_login(net);
13770d0950a9SMika Westerberg 	if (netif_running(net->dev)) {
13780d0950a9SMika Westerberg 		netif_device_detach(net->dev);
13790d0950a9SMika Westerberg 		tbnet_tear_down(net, true);
13800d0950a9SMika Westerberg 	}
13810d0950a9SMika Westerberg 
13820d0950a9SMika Westerberg 	tb_unregister_protocol_handler(&net->handler);
13830d0950a9SMika Westerberg 	return 0;
13840d0950a9SMika Westerberg }
13850d0950a9SMika Westerberg 
tbnet_resume(struct device * dev)13860d0950a9SMika Westerberg static int tbnet_resume(struct device *dev)
13870d0950a9SMika Westerberg {
13880d0950a9SMika Westerberg 	struct tb_service *svc = tb_to_service(dev);
13890d0950a9SMika Westerberg 	struct tbnet *net = tb_service_get_drvdata(svc);
13900d0950a9SMika Westerberg 
13910d0950a9SMika Westerberg 	tb_register_protocol_handler(&net->handler);
13920d0950a9SMika Westerberg 
13930d0950a9SMika Westerberg 	netif_carrier_off(net->dev);
13940d0950a9SMika Westerberg 	if (netif_running(net->dev)) {
13950d0950a9SMika Westerberg 		netif_device_attach(net->dev);
13960d0950a9SMika Westerberg 		start_login(net);
13970d0950a9SMika Westerberg 	}
13980d0950a9SMika Westerberg 
13990d0950a9SMika Westerberg 	return 0;
14000d0950a9SMika Westerberg }
14010d0950a9SMika Westerberg 
14020d0950a9SMika Westerberg static DEFINE_SIMPLE_DEV_PM_OPS(tbnet_pm_ops, tbnet_suspend, tbnet_resume);
14030d0950a9SMika Westerberg 
14040d0950a9SMika Westerberg static const struct tb_service_id tbnet_ids[] = {
14050d0950a9SMika Westerberg 	{ TB_SERVICE("network", 1) },
14060d0950a9SMika Westerberg 	{ },
14070d0950a9SMika Westerberg };
14080d0950a9SMika Westerberg MODULE_DEVICE_TABLE(tbsvc, tbnet_ids);
14090d0950a9SMika Westerberg 
14100d0950a9SMika Westerberg static struct tb_service_driver tbnet_driver = {
14110d0950a9SMika Westerberg 	.driver = {
14120d0950a9SMika Westerberg 		.owner = THIS_MODULE,
14130d0950a9SMika Westerberg 		.name = "thunderbolt-net",
14140d0950a9SMika Westerberg 		.pm = pm_sleep_ptr(&tbnet_pm_ops),
14150d0950a9SMika Westerberg 	},
14160d0950a9SMika Westerberg 	.probe = tbnet_probe,
14170d0950a9SMika Westerberg 	.remove = tbnet_remove,
14180d0950a9SMika Westerberg 	.shutdown = tbnet_shutdown,
14190d0950a9SMika Westerberg 	.id_table = tbnet_ids,
14200d0950a9SMika Westerberg };
14210d0950a9SMika Westerberg 
tbnet_init(void)14220d0950a9SMika Westerberg static int __init tbnet_init(void)
14230d0950a9SMika Westerberg {
14240d0950a9SMika Westerberg 	unsigned int flags;
14250d0950a9SMika Westerberg 	int ret;
14260d0950a9SMika Westerberg 
14270d0950a9SMika Westerberg 	tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
14280d0950a9SMika Westerberg 	if (!tbnet_dir)
14290d0950a9SMika Westerberg 		return -ENOMEM;
14300d0950a9SMika Westerberg 
14310d0950a9SMika Westerberg 	tb_property_add_immediate(tbnet_dir, "prtcid", 1);
14320d0950a9SMika Westerberg 	tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
14330d0950a9SMika Westerberg 	tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
14340d0950a9SMika Westerberg 
14350d0950a9SMika Westerberg 	flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
14360d0950a9SMika Westerberg 	if (tbnet_e2e)
14370d0950a9SMika Westerberg 		flags |= TBNET_E2E;
14380d0950a9SMika Westerberg 	tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
14390d0950a9SMika Westerberg 
14400d0950a9SMika Westerberg 	ret = tb_register_property_dir("network", tbnet_dir);
14410d0950a9SMika Westerberg 	if (ret)
14420d0950a9SMika Westerberg 		goto err_free_dir;
14430d0950a9SMika Westerberg 
14440d0950a9SMika Westerberg 	ret = tb_register_service_driver(&tbnet_driver);
14450d0950a9SMika Westerberg 	if (ret)
14460d0950a9SMika Westerberg 		goto err_unregister;
14470d0950a9SMika Westerberg 
14480d0950a9SMika Westerberg 	return 0;
14490d0950a9SMika Westerberg 
14500d0950a9SMika Westerberg err_unregister:
14510d0950a9SMika Westerberg 	tb_unregister_property_dir("network", tbnet_dir);
14520d0950a9SMika Westerberg err_free_dir:
14530d0950a9SMika Westerberg 	tb_property_free_dir(tbnet_dir);
14540d0950a9SMika Westerberg 
14550d0950a9SMika Westerberg 	return ret;
14560d0950a9SMika Westerberg }
14570d0950a9SMika Westerberg module_init(tbnet_init);
14580d0950a9SMika Westerberg 
tbnet_exit(void)14590d0950a9SMika Westerberg static void __exit tbnet_exit(void)
14600d0950a9SMika Westerberg {
14610d0950a9SMika Westerberg 	tb_unregister_service_driver(&tbnet_driver);
14620d0950a9SMika Westerberg 	tb_unregister_property_dir("network", tbnet_dir);
14630d0950a9SMika Westerberg 	tb_property_free_dir(tbnet_dir);
14640d0950a9SMika Westerberg }
14650d0950a9SMika Westerberg module_exit(tbnet_exit);
14660d0950a9SMika Westerberg 
14670d0950a9SMika Westerberg MODULE_AUTHOR("Amir Levy <amir.jer.levy@intel.com>");
14680d0950a9SMika Westerberg MODULE_AUTHOR("Michael Jamet <michael.jamet@intel.com>");
14690d0950a9SMika Westerberg MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
14700d0950a9SMika Westerberg MODULE_DESCRIPTION("Thunderbolt/USB4 network driver");
14710d0950a9SMika Westerberg MODULE_LICENSE("GPL v2");
1472