1 #ifndef _NET_FLOW_OFFLOAD_H 2 #define _NET_FLOW_OFFLOAD_H 3 4 #include <linux/kernel.h> 5 #include <linux/list.h> 6 #include <net/flow_dissector.h> 7 8 struct flow_match { 9 struct flow_dissector *dissector; 10 void *mask; 11 void *key; 12 }; 13 14 struct flow_match_meta { 15 struct flow_dissector_key_meta *key, *mask; 16 }; 17 18 struct flow_match_basic { 19 struct flow_dissector_key_basic *key, *mask; 20 }; 21 22 struct flow_match_control { 23 struct flow_dissector_key_control *key, *mask; 24 }; 25 26 struct flow_match_eth_addrs { 27 struct flow_dissector_key_eth_addrs *key, *mask; 28 }; 29 30 struct flow_match_vlan { 31 struct flow_dissector_key_vlan *key, *mask; 32 }; 33 34 struct flow_match_ipv4_addrs { 35 struct flow_dissector_key_ipv4_addrs *key, *mask; 36 }; 37 38 struct flow_match_ipv6_addrs { 39 struct flow_dissector_key_ipv6_addrs *key, *mask; 40 }; 41 42 struct flow_match_ip { 43 struct flow_dissector_key_ip *key, *mask; 44 }; 45 46 struct flow_match_ports { 47 struct flow_dissector_key_ports *key, *mask; 48 }; 49 50 struct flow_match_icmp { 51 struct flow_dissector_key_icmp *key, *mask; 52 }; 53 54 struct flow_match_tcp { 55 struct flow_dissector_key_tcp *key, *mask; 56 }; 57 58 struct flow_match_mpls { 59 struct flow_dissector_key_mpls *key, *mask; 60 }; 61 62 struct flow_match_enc_keyid { 63 struct flow_dissector_key_keyid *key, *mask; 64 }; 65 66 struct flow_match_enc_opts { 67 struct flow_dissector_key_enc_opts *key, *mask; 68 }; 69 70 struct flow_rule; 71 72 void flow_rule_match_meta(const struct flow_rule *rule, 73 struct flow_match_meta *out); 74 void flow_rule_match_basic(const struct flow_rule *rule, 75 struct flow_match_basic *out); 76 void flow_rule_match_control(const struct flow_rule *rule, 77 struct flow_match_control *out); 78 void flow_rule_match_eth_addrs(const struct flow_rule *rule, 79 struct flow_match_eth_addrs *out); 80 void flow_rule_match_vlan(const struct flow_rule *rule, 81 struct flow_match_vlan *out); 82 void flow_rule_match_cvlan(const struct flow_rule *rule, 83 struct flow_match_vlan *out); 84 void flow_rule_match_ipv4_addrs(const struct flow_rule *rule, 85 struct flow_match_ipv4_addrs *out); 86 void flow_rule_match_ipv6_addrs(const struct flow_rule *rule, 87 struct flow_match_ipv6_addrs *out); 88 void flow_rule_match_ip(const struct flow_rule *rule, 89 struct flow_match_ip *out); 90 void flow_rule_match_ports(const struct flow_rule *rule, 91 struct flow_match_ports *out); 92 void flow_rule_match_tcp(const struct flow_rule *rule, 93 struct flow_match_tcp *out); 94 void flow_rule_match_icmp(const struct flow_rule *rule, 95 struct flow_match_icmp *out); 96 void flow_rule_match_mpls(const struct flow_rule *rule, 97 struct flow_match_mpls *out); 98 void flow_rule_match_enc_control(const struct flow_rule *rule, 99 struct flow_match_control *out); 100 void flow_rule_match_enc_ipv4_addrs(const struct flow_rule *rule, 101 struct flow_match_ipv4_addrs *out); 102 void flow_rule_match_enc_ipv6_addrs(const struct flow_rule *rule, 103 struct flow_match_ipv6_addrs *out); 104 void flow_rule_match_enc_ip(const struct flow_rule *rule, 105 struct flow_match_ip *out); 106 void flow_rule_match_enc_ports(const struct flow_rule *rule, 107 struct flow_match_ports *out); 108 void flow_rule_match_enc_keyid(const struct flow_rule *rule, 109 struct flow_match_enc_keyid *out); 110 void flow_rule_match_enc_opts(const struct flow_rule *rule, 111 struct flow_match_enc_opts *out); 112 113 enum flow_action_id { 114 FLOW_ACTION_ACCEPT = 0, 115 FLOW_ACTION_DROP, 116 FLOW_ACTION_TRAP, 117 FLOW_ACTION_GOTO, 118 FLOW_ACTION_REDIRECT, 119 FLOW_ACTION_MIRRED, 120 FLOW_ACTION_VLAN_PUSH, 121 FLOW_ACTION_VLAN_POP, 122 FLOW_ACTION_VLAN_MANGLE, 123 FLOW_ACTION_TUNNEL_ENCAP, 124 FLOW_ACTION_TUNNEL_DECAP, 125 FLOW_ACTION_MANGLE, 126 FLOW_ACTION_ADD, 127 FLOW_ACTION_CSUM, 128 FLOW_ACTION_MARK, 129 FLOW_ACTION_WAKE, 130 FLOW_ACTION_QUEUE, 131 FLOW_ACTION_SAMPLE, 132 FLOW_ACTION_POLICE, 133 FLOW_ACTION_CT, 134 }; 135 136 /* This is mirroring enum pedit_header_type definition for easy mapping between 137 * tc pedit action. Legacy TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK is mapped to 138 * FLOW_ACT_MANGLE_UNSPEC, which is supported by no driver. 139 */ 140 enum flow_action_mangle_base { 141 FLOW_ACT_MANGLE_UNSPEC = 0, 142 FLOW_ACT_MANGLE_HDR_TYPE_ETH, 143 FLOW_ACT_MANGLE_HDR_TYPE_IP4, 144 FLOW_ACT_MANGLE_HDR_TYPE_IP6, 145 FLOW_ACT_MANGLE_HDR_TYPE_TCP, 146 FLOW_ACT_MANGLE_HDR_TYPE_UDP, 147 }; 148 149 struct flow_action_entry { 150 enum flow_action_id id; 151 union { 152 u32 chain_index; /* FLOW_ACTION_GOTO */ 153 struct net_device *dev; /* FLOW_ACTION_REDIRECT */ 154 struct { /* FLOW_ACTION_VLAN */ 155 u16 vid; 156 __be16 proto; 157 u8 prio; 158 } vlan; 159 struct { /* FLOW_ACTION_PACKET_EDIT */ 160 enum flow_action_mangle_base htype; 161 u32 offset; 162 u32 mask; 163 u32 val; 164 } mangle; 165 const struct ip_tunnel_info *tunnel; /* FLOW_ACTION_TUNNEL_ENCAP */ 166 u32 csum_flags; /* FLOW_ACTION_CSUM */ 167 u32 mark; /* FLOW_ACTION_MARK */ 168 struct { /* FLOW_ACTION_QUEUE */ 169 u32 ctx; 170 u32 index; 171 u8 vf; 172 } queue; 173 struct { /* FLOW_ACTION_SAMPLE */ 174 struct psample_group *psample_group; 175 u32 rate; 176 u32 trunc_size; 177 bool truncate; 178 } sample; 179 struct { /* FLOW_ACTION_POLICE */ 180 s64 burst; 181 u64 rate_bytes_ps; 182 } police; 183 struct { /* FLOW_ACTION_CT */ 184 int action; 185 u16 zone; 186 } ct; 187 }; 188 }; 189 190 struct flow_action { 191 unsigned int num_entries; 192 struct flow_action_entry entries[0]; 193 }; 194 195 static inline bool flow_action_has_entries(const struct flow_action *action) 196 { 197 return action->num_entries; 198 } 199 200 /** 201 * flow_action_has_one_action() - check if exactly one action is present 202 * @action: tc filter flow offload action 203 * 204 * Returns true if exactly one action is present. 205 */ 206 static inline bool flow_offload_has_one_action(const struct flow_action *action) 207 { 208 return action->num_entries == 1; 209 } 210 211 #define flow_action_for_each(__i, __act, __actions) \ 212 for (__i = 0, __act = &(__actions)->entries[0]; __i < (__actions)->num_entries; __act = &(__actions)->entries[++__i]) 213 214 struct flow_rule { 215 struct flow_match match; 216 struct flow_action action; 217 }; 218 219 struct flow_rule *flow_rule_alloc(unsigned int num_actions); 220 221 static inline bool flow_rule_match_key(const struct flow_rule *rule, 222 enum flow_dissector_key_id key) 223 { 224 return dissector_uses_key(rule->match.dissector, key); 225 } 226 227 struct flow_stats { 228 u64 pkts; 229 u64 bytes; 230 u64 lastused; 231 }; 232 233 static inline void flow_stats_update(struct flow_stats *flow_stats, 234 u64 bytes, u64 pkts, u64 lastused) 235 { 236 flow_stats->pkts += pkts; 237 flow_stats->bytes += bytes; 238 flow_stats->lastused = max_t(u64, flow_stats->lastused, lastused); 239 } 240 241 enum flow_block_command { 242 FLOW_BLOCK_BIND, 243 FLOW_BLOCK_UNBIND, 244 }; 245 246 enum flow_block_binder_type { 247 FLOW_BLOCK_BINDER_TYPE_UNSPEC, 248 FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS, 249 FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS, 250 }; 251 252 struct flow_block { 253 struct list_head cb_list; 254 }; 255 256 struct netlink_ext_ack; 257 258 struct flow_block_offload { 259 enum flow_block_command command; 260 enum flow_block_binder_type binder_type; 261 bool block_shared; 262 struct net *net; 263 struct flow_block *block; 264 struct list_head cb_list; 265 struct list_head *driver_block_list; 266 struct netlink_ext_ack *extack; 267 }; 268 269 enum tc_setup_type; 270 typedef int flow_setup_cb_t(enum tc_setup_type type, void *type_data, 271 void *cb_priv); 272 273 struct flow_block_cb { 274 struct list_head driver_list; 275 struct list_head list; 276 flow_setup_cb_t *cb; 277 void *cb_ident; 278 void *cb_priv; 279 void (*release)(void *cb_priv); 280 unsigned int refcnt; 281 }; 282 283 struct flow_block_cb *flow_block_cb_alloc(flow_setup_cb_t *cb, 284 void *cb_ident, void *cb_priv, 285 void (*release)(void *cb_priv)); 286 void flow_block_cb_free(struct flow_block_cb *block_cb); 287 288 struct flow_block_cb *flow_block_cb_lookup(struct flow_block *block, 289 flow_setup_cb_t *cb, void *cb_ident); 290 291 void *flow_block_cb_priv(struct flow_block_cb *block_cb); 292 void flow_block_cb_incref(struct flow_block_cb *block_cb); 293 unsigned int flow_block_cb_decref(struct flow_block_cb *block_cb); 294 295 static inline void flow_block_cb_add(struct flow_block_cb *block_cb, 296 struct flow_block_offload *offload) 297 { 298 list_add_tail(&block_cb->list, &offload->cb_list); 299 } 300 301 static inline void flow_block_cb_remove(struct flow_block_cb *block_cb, 302 struct flow_block_offload *offload) 303 { 304 list_move(&block_cb->list, &offload->cb_list); 305 } 306 307 bool flow_block_cb_is_busy(flow_setup_cb_t *cb, void *cb_ident, 308 struct list_head *driver_block_list); 309 310 int flow_block_cb_setup_simple(struct flow_block_offload *f, 311 struct list_head *driver_list, 312 flow_setup_cb_t *cb, 313 void *cb_ident, void *cb_priv, bool ingress_only); 314 315 enum flow_cls_command { 316 FLOW_CLS_REPLACE, 317 FLOW_CLS_DESTROY, 318 FLOW_CLS_STATS, 319 FLOW_CLS_TMPLT_CREATE, 320 FLOW_CLS_TMPLT_DESTROY, 321 }; 322 323 struct flow_cls_common_offload { 324 u32 chain_index; 325 __be16 protocol; 326 u32 prio; 327 struct netlink_ext_ack *extack; 328 }; 329 330 struct flow_cls_offload { 331 struct flow_cls_common_offload common; 332 enum flow_cls_command command; 333 unsigned long cookie; 334 struct flow_rule *rule; 335 struct flow_stats stats; 336 u32 classid; 337 }; 338 339 static inline struct flow_rule * 340 flow_cls_offload_flow_rule(struct flow_cls_offload *flow_cmd) 341 { 342 return flow_cmd->rule; 343 } 344 345 static inline void flow_block_init(struct flow_block *flow_block) 346 { 347 INIT_LIST_HEAD(&flow_block->cb_list); 348 } 349 350 #endif /* _NET_FLOW_OFFLOAD_H */ 351