1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* AF_XDP internal functions 3 * Copyright(c) 2018 Intel Corporation. 4 */ 5 6 #ifndef _LINUX_XDP_SOCK_H 7 #define _LINUX_XDP_SOCK_H 8 9 #include <linux/workqueue.h> 10 #include <linux/if_xdp.h> 11 #include <linux/mutex.h> 12 #include <linux/spinlock.h> 13 #include <linux/mm.h> 14 #include <net/sock.h> 15 16 struct net_device; 17 struct xsk_queue; 18 struct xdp_buff; 19 20 struct xdp_umem { 21 void *addrs; 22 u64 size; 23 u32 headroom; 24 u32 chunk_size; 25 u32 chunks; 26 u32 npgs; 27 struct user_struct *user; 28 refcount_t users; 29 u8 flags; 30 bool zc; 31 struct page **pgs; 32 int id; 33 struct list_head xsk_dma_list; 34 struct work_struct work; 35 }; 36 37 struct xsk_map { 38 struct bpf_map map; 39 spinlock_t lock; /* Synchronize map updates */ 40 struct xdp_sock *xsk_map[]; 41 }; 42 43 struct xdp_sock { 44 /* struct sock must be the first member of struct xdp_sock */ 45 struct sock sk; 46 struct xsk_queue *rx ____cacheline_aligned_in_smp; 47 struct net_device *dev; 48 struct xdp_umem *umem; 49 struct list_head flush_node; 50 struct xsk_buff_pool *pool; 51 u16 queue_id; 52 bool zc; 53 enum { 54 XSK_READY = 0, 55 XSK_BOUND, 56 XSK_UNBOUND, 57 } state; 58 59 struct xsk_queue *tx ____cacheline_aligned_in_smp; 60 struct list_head tx_list; 61 /* Mutual exclusion of NAPI TX thread and sendmsg error paths 62 * in the SKB destructor callback. 63 */ 64 spinlock_t tx_completion_lock; 65 /* Protects generic receive. */ 66 spinlock_t rx_lock; 67 68 /* Statistics */ 69 u64 rx_dropped; 70 u64 rx_queue_full; 71 72 struct list_head map_list; 73 /* Protects map_list */ 74 spinlock_t map_list_lock; 75 /* Protects multiple processes in the control path */ 76 struct mutex mutex; 77 struct xsk_queue *fq_tmp; /* Only as tmp storage before bind */ 78 struct xsk_queue *cq_tmp; /* Only as tmp storage before bind */ 79 }; 80 81 #ifdef CONFIG_XDP_SOCKETS 82 83 int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); 84 int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); 85 void __xsk_map_flush(void); 86 87 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 88 u32 key) 89 { 90 struct xsk_map *m = container_of(map, struct xsk_map, map); 91 struct xdp_sock *xs; 92 93 if (key >= map->max_entries) 94 return NULL; 95 96 xs = READ_ONCE(m->xsk_map[key]); 97 return xs; 98 } 99 100 #else 101 102 static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) 103 { 104 return -ENOTSUPP; 105 } 106 107 static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) 108 { 109 return -EOPNOTSUPP; 110 } 111 112 static inline void __xsk_map_flush(void) 113 { 114 } 115 116 static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, 117 u32 key) 118 { 119 return NULL; 120 } 121 122 #endif /* CONFIG_XDP_SOCKETS */ 123 124 #endif /* _LINUX_XDP_SOCK_H */ 125