1 #ifndef _NET_FLOW_OFFLOAD_H 2 #define _NET_FLOW_OFFLOAD_H 3 4 #include <linux/kernel.h> 5 #include <linux/list.h> 6 #include <net/flow_dissector.h> 7 8 struct flow_match { 9 struct flow_dissector *dissector; 10 void *mask; 11 void *key; 12 }; 13 14 struct flow_match_meta { 15 struct flow_dissector_key_meta *key, *mask; 16 }; 17 18 struct flow_match_basic { 19 struct flow_dissector_key_basic *key, *mask; 20 }; 21 22 struct flow_match_control { 23 struct flow_dissector_key_control *key, *mask; 24 }; 25 26 struct flow_match_eth_addrs { 27 struct flow_dissector_key_eth_addrs *key, *mask; 28 }; 29 30 struct flow_match_vlan { 31 struct flow_dissector_key_vlan *key, *mask; 32 }; 33 34 struct flow_match_ipv4_addrs { 35 struct flow_dissector_key_ipv4_addrs *key, *mask; 36 }; 37 38 struct flow_match_ipv6_addrs { 39 struct flow_dissector_key_ipv6_addrs *key, *mask; 40 }; 41 42 struct flow_match_ip { 43 struct flow_dissector_key_ip *key, *mask; 44 }; 45 46 struct flow_match_ports { 47 struct flow_dissector_key_ports *key, *mask; 48 }; 49 50 struct flow_match_icmp { 51 struct flow_dissector_key_icmp *key, *mask; 52 }; 53 54 struct flow_match_tcp { 55 struct flow_dissector_key_tcp *key, *mask; 56 }; 57 58 struct flow_match_mpls { 59 struct flow_dissector_key_mpls *key, *mask; 60 }; 61 62 struct flow_match_enc_keyid { 63 struct flow_dissector_key_keyid *key, *mask; 64 }; 65 66 struct flow_match_enc_opts { 67 struct flow_dissector_key_enc_opts *key, *mask; 68 }; 69 70 struct flow_rule; 71 72 void flow_rule_match_meta(const struct flow_rule *rule, 73 struct flow_match_meta *out); 74 void flow_rule_match_basic(const struct flow_rule *rule, 75 struct flow_match_basic *out); 76 void flow_rule_match_control(const struct flow_rule *rule, 77 struct flow_match_control *out); 78 void flow_rule_match_eth_addrs(const struct flow_rule *rule, 79 struct flow_match_eth_addrs *out); 80 void flow_rule_match_vlan(const struct flow_rule *rule, 81 struct flow_match_vlan *out); 82 void flow_rule_match_cvlan(const struct flow_rule *rule, 83 struct flow_match_vlan *out); 84 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 85 struct flow_match_ipv4_addrs *out); 86 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, 87 struct flow_match_ipv6_addrs *out); 88 void flow_rule_match_ip(const struct flow_rule *rule, 89 struct flow_match_ip *out); 90 void flow_rule_match_ports(const struct flow_rule *rule, 91 struct flow_match_ports *out); 92 void flow_rule_match_tcp(const struct flow_rule *rule, 93 struct flow_match_tcp *out); 94 void flow_rule_match_icmp(const struct flow_rule *rule, 95 struct flow_match_icmp *out); 96 void flow_rule_match_mpls(const struct flow_rule *rule, 97 struct flow_match_mpls *out); 98 void flow_rule_match_enc_control(const struct flow_rule *rule, 99 struct flow_match_control *out); 100 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, 101 struct flow_match_ipv4_addrs *out); 102 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, 103 struct flow_match_ipv6_addrs *out); 104 void flow_rule_match_enc_ip(const struct flow_rule *rule, 105 struct flow_match_ip *out); 106 void flow_rule_match_enc_ports(const struct flow_rule *rule, 107 struct flow_match_ports *out); 108 void flow_rule_match_enc_keyid(const struct flow_rule *rule, 109 struct flow_match_enc_keyid *out); 110 void flow_rule_match_enc_opts(const struct flow_rule *rule, 111 struct flow_match_enc_opts *out); 112 113 enum flow_action_id { 114 FLOW_ACTION_ACCEPT = 0, 115 FLOW_ACTION_DROP, 116 FLOW_ACTION_TRAP, 117 FLOW_ACTION_GOTO, 118 FLOW_ACTION_REDIRECT, 119 FLOW_ACTION_MIRRED, 120 FLOW_ACTION_VLAN_PUSH, 121 FLOW_ACTION_VLAN_POP, 122 FLOW_ACTION_VLAN_MANGLE, 123 FLOW_ACTION_TUNNEL_ENCAP, 124 FLOW_ACTION_TUNNEL_DECAP, 125 FLOW_ACTION_MANGLE, 126 FLOW_ACTION_ADD, 127 FLOW_ACTION_CSUM, 128 FLOW_ACTION_MARK, 129 FLOW_ACTION_WAKE, 130 FLOW_ACTION_QUEUE, 131 FLOW_ACTION_SAMPLE, 132 FLOW_ACTION_POLICE, 133 FLOW_ACTION_CT, 134 FLOW_ACTION_MPLS_PUSH, 135 FLOW_ACTION_MPLS_POP, 136 FLOW_ACTION_MPLS_MANGLE, 137 }; 138 139 /* This is mirroring enum pedit_header_type definition for easy mapping between 140 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to 141 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver. 142 */ 143 enum flow_action_mangle_base { 144 FLOW_ACT_MANGLE_UNSPEC = 0, 145 FLOW_ACT_MANGLE_HDR_TYPE_ETH, 146 FLOW_ACT_MANGLE_HDR_TYPE_IP4, 147 FLOW_ACT_MANGLE_HDR_TYPE_IP6, 148 FLOW_ACT_MANGLE_HDR_TYPE_TCP, 149 FLOW_ACT_MANGLE_HDR_TYPE_UDP, 150 }; 151 152 struct flow_action_entry { 153 enum flow_action_id id; 154 union { 155 u32 chain_index; /* FLOW_ACTION_GOTO */ 156 struct net_device *dev; /* FLOW_ACTION_REDIRECT */ 157 struct { /* FLOW_ACTION_VLAN */ 158 u16 vid; 159 __be16 proto; 160 u8 prio; 161 } vlan; 162 struct { /* FLOW_ACTION_PACKET_EDIT */ 163 enum flow_action_mangle_base htype; 164 u32 offset; 165 u32 mask; 166 u32 val; 167 } mangle; 168 const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ 169 u32 csum_flags; /* FLOW_ACTION_CSUM */ 170 u32 mark; /* FLOW_ACTION_MARK */ 171 struct { /* FLOW_ACTION_QUEUE */ 172 u32 ctx; 173 u32 index; 174 u8 vf; 175 } queue; 176 struct { /* FLOW_ACTION_SAMPLE */ 177 struct psample_group *psample_group; 178 u32 rate; 179 u32 trunc_size; 180 bool truncate; 181 } sample; 182 struct { /* FLOW_ACTION_POLICE */ 183 s64 burst; 184 u64 rate_bytes_ps; 185 } police; 186 struct { /* FLOW_ACTION_CT */ 187 int action; 188 u16 zone; 189 } ct; 190 struct { /* FLOW_ACTION_MPLS_PUSH */ 191 u32 label; 192 __be16 proto; 193 u8 tc; 194 u8 bos; 195 u8 ttl; 196 } mpls_push; 197 struct { /* FLOW_ACTION_MPLS_POP */ 198 __be16 proto; 199 } mpls_pop; 200 struct { /* FLOW_ACTION_MPLS_MANGLE */ 201 u32 label; 202 u8 tc; 203 u8 bos; 204 u8 ttl; 205 } mpls_mangle; 206 }; 207 }; 208 209 struct flow_action { 210 unsigned int num_entries; 211 struct flow_action_entry entries[0]; 212 }; 213 214 static inline bool flow_action_has_entries(const struct flow_action *action) 215 { 216 return action->num_entries; 217 } 218 219 /** 220 * flow_action_has_one_action() - check if exactly one action is present 221 * @action: tc filter flow offload action 222 * 223 * Returns true if exactly one action is present. 224 */ 225 static inline bool flow_offload_has_one_action(const struct flow_action *action) 226 { 227 return action->num_entries == 1; 228 } 229 230 #define flow_action_for_each(__i, __act, __actions) \ 231 for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) 232 233 struct flow_rule { 234 struct flow_match match; 235 struct flow_action action; 236 }; 237 238 struct flow_rule *flow_rule_alloc(unsigned int num_actions); 239 240 static inline bool flow_rule_match_key(const struct flow_rule *rule, 241 enum flow_dissector_key_id key) 242 { 243 return dissector_uses_key(rule->match.dissector, key); 244 } 245 246 struct flow_stats { 247 u64 pkts; 248 u64 bytes; 249 u64 lastused; 250 }; 251 252 static inline void flow_stats_update(struct flow_stats *flow_stats, 253 u64 bytes, u64 pkts, u64 lastused) 254 { 255 flow_stats->pkts += pkts; 256 flow_stats->bytes += bytes; 257 flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); 258 } 259 260 enum flow_block_command { 261 FLOW_BLOCK_BIND, 262 FLOW_BLOCK_UNBIND, 263 }; 264 265 enum flow_block_binder_type { 266 FLOW_BLOCK_BINDER_TYPE_UNSPEC, 267 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, 268 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 269 }; 270 271 struct flow_block { 272 struct list_head cb_list; 273 }; 274 275 struct netlink_ext_ack; 276 277 struct flow_block_offload { 278 enum flow_block_command command; 279 enum flow_block_binder_type binder_type; 280 bool block_shared; 281 struct net *net; 282 struct flow_block *block; 283 struct list_head cb_list; 284 struct list_head *driver_block_list; 285 struct netlink_ext_ack *extack; 286 }; 287 288 enum tc_setup_type; 289 typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, 290 void *cb_priv); 291 292 struct flow_block_cb { 293 struct list_head driver_list; 294 struct list_head list; 295 flow_setup_cb_t *cb; 296 void *cb_ident; 297 void *cb_priv; 298 void (*release)(void *cb_priv); 299 unsigned int refcnt; 300 }; 301 302 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 303 void *cb_ident, void *cb_priv, 304 void (*release)(void *cb_priv)); 305 void flow_block_cb_free(struct flow_block_cb *block_cb); 306 307 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 308 flow_setup_cb_t *cb, void *cb_ident); 309 310 void *flow_block_cb_priv(struct flow_block_cb *block_cb); 311 void flow_block_cb_incref(struct flow_block_cb *block_cb); 312 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb); 313 314 static inline void flow_block_cb_add(struct flow_block_cb *block_cb, 315 struct flow_block_offload *offload) 316 { 317 list_add_tail(&block_cb->list, &offload->cb_list); 318 } 319 320 static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, 321 struct flow_block_offload *offload) 322 { 323 list_move(&block_cb->list, &offload->cb_list); 324 } 325 326 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 327 struct list_head *driver_block_list); 328 329 int flow_block_cb_setup_simple(struct flow_block_offload *f, 330 struct list_head *driver_list, 331 flow_setup_cb_t *cb, 332 void *cb_ident, void *cb_priv, bool ingress_only); 333 334 enum flow_cls_command { 335 FLOW_CLS_REPLACE, 336 FLOW_CLS_DESTROY, 337 FLOW_CLS_STATS, 338 FLOW_CLS_TMPLT_CREATE, 339 FLOW_CLS_TMPLT_DESTROY, 340 }; 341 342 struct flow_cls_common_offload { 343 u32 chain_index; 344 __be16 protocol; 345 u32 prio; 346 struct netlink_ext_ack *extack; 347 }; 348 349 struct flow_cls_offload { 350 struct flow_cls_common_offload common; 351 enum flow_cls_command command; 352 unsigned long cookie; 353 struct flow_rule *rule; 354 struct flow_stats stats; 355 u32 classid; 356 }; 357 358 static inline struct flow_rule * 359 flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) 360 { 361 return flow_cmd->rule; 362 } 363 364 static inline void flow_block_init(struct flow_block *flow_block) 365 { 366 INIT_LIST_HEAD(&flow_block->cb_list); 367 } 368 369 #endif /* _NET_FLOW_OFFLOAD_H */ 370