1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef IOU_RSRC_H 3 #define IOU_RSRC_H 4 5 #include <net/af_unix.h> 6 7 #define IO_RSRC_TAG_TABLE_SHIFT (PAGE_SHIFT - 3) 8 #define IO_RSRC_TAG_TABLE_MAX (1U << IO_RSRC_TAG_TABLE_SHIFT) 9 #define IO_RSRC_TAG_TABLE_MASK (IO_RSRC_TAG_TABLE_MAX - 1) 10 11 enum { 12 IORING_RSRC_FILE = 0, 13 IORING_RSRC_BUFFER = 1, 14 }; 15 16 struct io_rsrc_put { 17 struct list_head list; 18 u64 tag; 19 union { 20 void *rsrc; 21 struct file *file; 22 struct io_mapped_ubuf *buf; 23 }; 24 }; 25 26 typedef void (rsrc_put_fn)(struct io_ring_ctx *ctx, struct io_rsrc_put *prsrc); 27 28 struct io_rsrc_data { 29 struct io_ring_ctx *ctx; 30 31 u64 **tags; 32 unsigned int nr; 33 rsrc_put_fn *do_put; 34 atomic_t refs; 35 struct completion done; 36 bool quiesce; 37 }; 38 39 struct io_rsrc_node { 40 struct percpu_ref refs; 41 struct list_head node; 42 struct list_head rsrc_list; 43 struct io_rsrc_data *rsrc_data; 44 struct llist_node llist; 45 bool done; 46 }; 47 48 struct io_mapped_ubuf { 49 u64 ubuf; 50 u64 ubuf_end; 51 unsigned int nr_bvecs; 52 unsigned long acct_pages; 53 struct bio_vec bvec[]; 54 }; 55 56 void io_rsrc_put_work(struct work_struct *work); 57 void io_rsrc_refs_refill(struct io_ring_ctx *ctx); 58 void io_wait_rsrc_data(struct io_rsrc_data *data); 59 void io_rsrc_node_destroy(struct io_rsrc_node *ref_node); 60 void io_rsrc_refs_drop(struct io_ring_ctx *ctx); 61 int io_rsrc_node_switch_start(struct io_ring_ctx *ctx); 62 int io_queue_rsrc_removal(struct io_rsrc_data *data, unsigned idx, 63 struct io_rsrc_node *node, void *rsrc); 64 void io_rsrc_node_switch(struct io_ring_ctx *ctx, 65 struct io_rsrc_data *data_to_kill); 66 67 int io_import_fixed(int ddir, struct iov_iter *iter, 68 struct io_mapped_ubuf *imu, 69 u64 buf_addr, size_t len); 70 71 void __io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 72 int io_sqe_buffers_unregister(struct io_ring_ctx *ctx); 73 int io_sqe_buffers_register(struct io_ring_ctx *ctx, void __user *arg, 74 unsigned int nr_args, u64 __user *tags); 75 void __io_sqe_files_unregister(struct io_ring_ctx *ctx); 76 int io_sqe_files_unregister(struct io_ring_ctx *ctx); 77 int io_sqe_files_register(struct io_ring_ctx *ctx, void __user *arg, 78 unsigned nr_args, u64 __user *tags); 79 80 int __io_scm_file_account(struct io_ring_ctx *ctx, struct file *file); 81 82 #if defined(CONFIG_UNIX) 83 static inline bool io_file_need_scm(struct file *filp) 84 { 85 #if defined(IO_URING_SCM_ALL) 86 return true; 87 #else 88 return !!unix_get_socket(filp); 89 #endif 90 } 91 #else 92 static inline bool io_file_need_scm(struct file *filp) 93 { 94 return false; 95 } 96 #endif 97 98 static inline int io_scm_file_account(struct io_ring_ctx *ctx, 99 struct file *file) 100 { 101 if (likely(!io_file_need_scm(file))) 102 return 0; 103 return __io_scm_file_account(ctx, file); 104 } 105 106 int io_register_files_update(struct io_ring_ctx *ctx, void __user *arg, 107 unsigned nr_args); 108 int io_register_rsrc_update(struct io_ring_ctx *ctx, void __user *arg, 109 unsigned size, unsigned type); 110 int io_register_rsrc(struct io_ring_ctx *ctx, void __user *arg, 111 unsigned int size, unsigned int type); 112 113 static inline void io_rsrc_put_node(struct io_rsrc_node *node, int nr) 114 { 115 percpu_ref_put_many(&node->refs, nr); 116 } 117 118 static inline void io_req_put_rsrc(struct io_kiocb *req) 119 { 120 if (req->rsrc_node) 121 io_rsrc_put_node(req->rsrc_node, 1); 122 } 123 124 static inline void io_req_put_rsrc_locked(struct io_kiocb *req, 125 struct io_ring_ctx *ctx) 126 __must_hold(&ctx->uring_lock) 127 { 128 struct io_rsrc_node *node = req->rsrc_node; 129 130 if (node) { 131 if (node == ctx->rsrc_node) 132 ctx->rsrc_cached_refs++; 133 else 134 io_rsrc_put_node(node, 1); 135 } 136 } 137 138 static inline void io_charge_rsrc_node(struct io_ring_ctx *ctx) 139 { 140 ctx->rsrc_cached_refs--; 141 if (unlikely(ctx->rsrc_cached_refs < 0)) 142 io_rsrc_refs_refill(ctx); 143 } 144 145 static inline void io_req_set_rsrc_node(struct io_kiocb *req, 146 struct io_ring_ctx *ctx, 147 unsigned int issue_flags) 148 { 149 if (!req->rsrc_node) { 150 req->rsrc_node = ctx->rsrc_node; 151 152 if (!(issue_flags & IO_URING_F_UNLOCKED)) { 153 lockdep_assert_held(&ctx->uring_lock); 154 155 io_charge_rsrc_node(ctx); 156 } else { 157 percpu_ref_get(&req->rsrc_node->refs); 158 } 159 } 160 } 161 162 static inline u64 *io_get_tag_slot(struct io_rsrc_data *data, unsigned int idx) 163 { 164 unsigned int off = idx & IO_RSRC_TAG_TABLE_MASK; 165 unsigned int table_idx = idx >> IO_RSRC_TAG_TABLE_SHIFT; 166 167 return &data->tags[table_idx][off]; 168 } 169 170 int io_rsrc_update(struct io_kiocb *req, unsigned int issue_flags); 171 int io_rsrc_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe); 172 173 int __io_account_mem(struct user_struct *user, unsigned long nr_pages); 174 175 static inline void __io_unaccount_mem(struct user_struct *user, 176 unsigned long nr_pages) 177 { 178 atomic_long_sub(nr_pages, &user->locked_vm); 179 } 180 181 #endif 182