1 /****************************************************************************** 2 * netif.h 3 * 4 * Unified network-device I/O interface for Xen guest OSes. 5 * 6 * Copyright (c) 2003-2004, Keir Fraser 7 */ 8 9 #ifndef __XEN_PUBLIC_IO_NETIF_H__ 10 #define __XEN_PUBLIC_IO_NETIF_H__ 11 12 #include <xen/interface/io/ring.h> 13 #include <xen/interface/grant_table.h> 14 15 /* 16 * Older implementation of Xen network frontend / backend has an 17 * implicit dependency on the MAX_SKB_FRAGS as the maximum number of 18 * ring slots a skb can use. Netfront / netback may not work as 19 * expected when frontend and backend have different MAX_SKB_FRAGS. 20 * 21 * A better approach is to add mechanism for netfront / netback to 22 * negotiate this value. However we cannot fix all possible 23 * frontends, so we need to define a value which states the minimum 24 * slots backend must support. 25 * 26 * The minimum value derives from older Linux kernel's MAX_SKB_FRAGS 27 * (18), which is proved to work with most frontends. Any new backend 28 * which doesn't negotiate with frontend should expect frontend to 29 * send a valid packet using slots up to this value. 30 */ 31 #define XEN_NETIF_NR_SLOTS_MIN 18 32 33 /* 34 * Notifications after enqueuing any type of message should be conditional on 35 * the appropriate req_event or rsp_event field in the shared ring. 36 * If the client sends notification for rx requests then it should specify 37 * feature 'feature-rx-notify' via xenbus. Otherwise the backend will assume 38 * that it cannot safely queue packets (as it may not be kicked to send them). 39 */ 40 41 /* 42 * "feature-split-event-channels" is introduced to separate guest TX 43 * and RX notificaion. Backend either doesn't support this feature or 44 * advertise it via xenstore as 0 (disabled) or 1 (enabled). 45 * 46 * To make use of this feature, frontend should allocate two event 47 * channels for TX and RX, advertise them to backend as 48 * "event-channel-tx" and "event-channel-rx" respectively. If frontend 49 * doesn't want to use this feature, it just writes "event-channel" 50 * node as before. 51 */ 52 53 /* 54 * "feature-no-csum-offload" should be used to turn IPv4 TCP/UDP checksum 55 * offload off or on. If it is missing then the feature is assumed to be on. 56 * "feature-ipv6-csum-offload" should be used to turn IPv6 TCP/UDP checksum 57 * offload on or off. If it is missing then the feature is assumed to be off. 58 */ 59 60 /* 61 * "feature-gso-tcpv4" and "feature-gso-tcpv6" advertise the capability to 62 * handle large TCP packets (in IPv4 or IPv6 form respectively). Neither 63 * frontends nor backends are assumed to be capable unless the flags are 64 * present. 65 */ 66 67 /* 68 * This is the 'wire' format for packets: 69 * Request 1: xen_netif_tx_request -- XEN_NETTXF_* (any flags) 70 * [Request 2: xen_netif_extra_info] (only if request 1 has XEN_NETTXF_extra_info) 71 * [Request 3: xen_netif_extra_info] (only if request 2 has XEN_NETIF_EXTRA_MORE) 72 * Request 4: xen_netif_tx_request -- XEN_NETTXF_more_data 73 * Request 5: xen_netif_tx_request -- XEN_NETTXF_more_data 74 * ... 75 * Request N: xen_netif_tx_request -- 0 76 */ 77 78 /* Protocol checksum field is blank in the packet (hardware offload)? */ 79 #define _XEN_NETTXF_csum_blank (0) 80 #define XEN_NETTXF_csum_blank (1U<<_XEN_NETTXF_csum_blank) 81 82 /* Packet data has been validated against protocol checksum. */ 83 #define _XEN_NETTXF_data_validated (1) 84 #define XEN_NETTXF_data_validated (1U<<_XEN_NETTXF_data_validated) 85 86 /* Packet continues in the next request descriptor. */ 87 #define _XEN_NETTXF_more_data (2) 88 #define XEN_NETTXF_more_data (1U<<_XEN_NETTXF_more_data) 89 90 /* Packet to be followed by extra descriptor(s). */ 91 #define _XEN_NETTXF_extra_info (3) 92 #define XEN_NETTXF_extra_info (1U<<_XEN_NETTXF_extra_info) 93 94 #define XEN_NETIF_MAX_TX_SIZE 0xFFFF 95 struct xen_netif_tx_request { 96 grant_ref_t gref; /* Reference to buffer page */ 97 uint16_t offset; /* Offset within buffer page */ 98 uint16_t flags; /* XEN_NETTXF_* */ 99 uint16_t id; /* Echoed in response message. */ 100 uint16_t size; /* Packet size in bytes. */ 101 }; 102 103 /* Types of xen_netif_extra_info descriptors. */ 104 #define XEN_NETIF_EXTRA_TYPE_NONE (0) /* Never used - invalid */ 105 #define XEN_NETIF_EXTRA_TYPE_GSO (1) /* u.gso */ 106 #define XEN_NETIF_EXTRA_TYPE_MAX (2) 107 108 /* xen_netif_extra_info flags. */ 109 #define _XEN_NETIF_EXTRA_FLAG_MORE (0) 110 #define XEN_NETIF_EXTRA_FLAG_MORE (1U<<_XEN_NETIF_EXTRA_FLAG_MORE) 111 112 /* GSO types */ 113 #define XEN_NETIF_GSO_TYPE_NONE (0) 114 #define XEN_NETIF_GSO_TYPE_TCPV4 (1) 115 #define XEN_NETIF_GSO_TYPE_TCPV6 (2) 116 117 /* 118 * This structure needs to fit within both netif_tx_request and 119 * netif_rx_response for compatibility. 120 */ 121 struct xen_netif_extra_info { 122 uint8_t type; /* XEN_NETIF_EXTRA_TYPE_* */ 123 uint8_t flags; /* XEN_NETIF_EXTRA_FLAG_* */ 124 125 union { 126 struct { 127 /* 128 * Maximum payload size of each segment. For 129 * example, for TCP this is just the path MSS. 130 */ 131 uint16_t size; 132 133 /* 134 * GSO type. This determines the protocol of 135 * the packet and any extra features required 136 * to segment the packet properly. 137 */ 138 uint8_t type; /* XEN_NETIF_GSO_TYPE_* */ 139 140 /* Future expansion. */ 141 uint8_t pad; 142 143 /* 144 * GSO features. This specifies any extra GSO 145 * features required to process this packet, 146 * such as ECN support for TCPv4. 147 */ 148 uint16_t features; /* XEN_NETIF_GSO_FEAT_* */ 149 } gso; 150 151 uint16_t pad[3]; 152 } u; 153 }; 154 155 struct xen_netif_tx_response { 156 uint16_t id; 157 int16_t status; /* XEN_NETIF_RSP_* */ 158 }; 159 160 struct xen_netif_rx_request { 161 uint16_t id; /* Echoed in response message. */ 162 grant_ref_t gref; /* Reference to incoming granted frame */ 163 }; 164 165 /* Packet data has been validated against protocol checksum. */ 166 #define _XEN_NETRXF_data_validated (0) 167 #define XEN_NETRXF_data_validated (1U<<_XEN_NETRXF_data_validated) 168 169 /* Protocol checksum field is blank in the packet (hardware offload)? */ 170 #define _XEN_NETRXF_csum_blank (1) 171 #define XEN_NETRXF_csum_blank (1U<<_XEN_NETRXF_csum_blank) 172 173 /* Packet continues in the next request descriptor. */ 174 #define _XEN_NETRXF_more_data (2) 175 #define XEN_NETRXF_more_data (1U<<_XEN_NETRXF_more_data) 176 177 /* Packet to be followed by extra descriptor(s). */ 178 #define _XEN_NETRXF_extra_info (3) 179 #define XEN_NETRXF_extra_info (1U<<_XEN_NETRXF_extra_info) 180 181 /* GSO Prefix descriptor. */ 182 #define _XEN_NETRXF_gso_prefix (4) 183 #define XEN_NETRXF_gso_prefix (1U<<_XEN_NETRXF_gso_prefix) 184 185 struct xen_netif_rx_response { 186 uint16_t id; 187 uint16_t offset; /* Offset in page of start of received packet */ 188 uint16_t flags; /* XEN_NETRXF_* */ 189 int16_t status; /* -ve: BLKIF_RSP_* ; +ve: Rx'ed pkt size. */ 190 }; 191 192 /* 193 * Generate netif ring structures and types. 194 */ 195 196 DEFINE_RING_TYPES(xen_netif_tx, 197 struct xen_netif_tx_request, 198 struct xen_netif_tx_response); 199 DEFINE_RING_TYPES(xen_netif_rx, 200 struct xen_netif_rx_request, 201 struct xen_netif_rx_response); 202 203 #define XEN_NETIF_RSP_DROPPED -2 204 #define XEN_NETIF_RSP_ERROR -1 205 #define XEN_NETIF_RSP_OKAY 0 206 /* No response: used for auxiliary requests (e.g., xen_netif_extra_info). */ 207 #define XEN_NETIF_RSP_NULL 1 208 209 #endif 210