1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* AF_XDP internal functions 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XDP_SOCK_H 7 #define _LINUX_XDP_SOCK_H 8 9 #include <linux/workqueue.h> 10 #include <linux/if_xdp.h> 11 #include <linux/mutex.h> 12 #include <linux/spinlock.h> 13 #include <linux/mm.h> 14 #include <net/sock.h> 15 16 struct net_device; 17 struct xsk_queue; 18 struct xdp_buff; 19 20 struct xdp_umem { 21 void *addrs; 22 u64 size; 23 u32 headroom; 24 u32 chunk_size; 25 u32 chunks; 26 u32 npgs; 27 struct user_struct *user; 28 refcount_t users; 29 u8 flags; 30 bool zc; 31 struct page **pgs; 32 int id; 33 struct list_head xsk_dma_list; 34 }; 35 36 struct xsk_map { 37 struct bpf_map map; 38 spinlock_t lock; /* Synchronize map updates */ 39 struct xdp_sock *xsk_map[]; 40 }; 41 42 struct xdp_sock { 43 /* struct sock must be the first member of struct xdp_sock */ 44 struct sock sk; 45 struct xsk_queue *rx ____cacheline_aligned_in_smp; 46 struct net_device *dev; 47 struct xdp_umem *umem; 48 struct list_head flush_node; 49 struct xsk_buff_pool *pool; 50 u16 queue_id; 51 bool zc; 52 enum { 53 XSK_READY = 0, 54 XSK_BOUND, 55 XSK_UNBOUND, 56 } state; 57 58 struct xsk_queue *tx ____cacheline_aligned_in_smp; 59 struct list_head tx_list; 60 /* Mutual exclusion of NAPI TX thread and sendmsg error paths 61 * in the SKB destructor callback. 62 */ 63 spinlock_t tx_completion_lock; 64 /* Protects generic receive. */ 65 spinlock_t rx_lock; 66 67 /* Statistics */ 68 u64 rx_dropped; 69 u64 rx_queue_full; 70 71 struct list_head map_list; 72 /* Protects map_list */ 73 spinlock_t map_list_lock; 74 /* Protects multiple processes in the control path */ 75 struct mutex mutex; 76 struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */ 77 struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ 78 }; 79 80 #ifdef CONFIG_XDP_SOCKETS 81 82 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 83 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); 84 void __xsk_map_flush(void); 85 86 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 87 u32 key) 88 { 89 struct xsk_map *m = container_of(map, struct xsk_map, map); 90 struct xdp_sock *xs; 91 92 if (key >= map->max_entries) 93 return NULL; 94 95 xs = READ_ONCE(m->xsk_map[key]); 96 return xs; 97 } 98 99 #else 100 101 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 102 { 103 return -ENOTSUPP; 104 } 105 106 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 107 { 108 return -EOPNOTSUPP; 109 } 110 111 static inline void __xsk_map_flush(void) 112 { 113 } 114 115 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 116 u32 key) 117 { 118 return NULL; 119 } 120 121 #endif /* CONFIG_XDP_SOCKETS */ 122 123 #endif /* _LINUX_XDP_SOCK_H */ 124