1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/kernel.h> 3 #include <linux/errno.h> 4 #include <linux/fs.h> 5 #include <linux/file.h> 6 #include <linux/proc_fs.h> 7 #include <linux/seq_file.h> 8 #include <linux/io_uring.h> 9 10 #include <uapi/linux/io_uring.h> 11 12 #include "io_uring.h" 13 #include "sqpoll.h" 14 #include "fdinfo.h" 15 #include "cancel.h" 16 #include "rsrc.h" 17 18 #ifdef CONFIG_PROC_FS 19 static __cold int io_uring_show_cred(struct seq_file *m, unsigned int id, 20 const struct cred *cred) 21 { 22 struct user_namespace *uns = seq_user_ns(m); 23 struct group_info *gi; 24 kernel_cap_t cap; 25 unsigned __capi; 26 int g; 27 28 seq_printf(m, "%5d\n", id); 29 seq_put_decimal_ull(m, "\tUid:\t", from_kuid_munged(uns, cred->uid)); 30 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->euid)); 31 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->suid)); 32 seq_put_decimal_ull(m, "\t\t", from_kuid_munged(uns, cred->fsuid)); 33 seq_put_decimal_ull(m, "\n\tGid:\t", from_kgid_munged(uns, cred->gid)); 34 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->egid)); 35 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->sgid)); 36 seq_put_decimal_ull(m, "\t\t", from_kgid_munged(uns, cred->fsgid)); 37 seq_puts(m, "\n\tGroups:\t"); 38 gi = cred->group_info; 39 for (g = 0; g < gi->ngroups; g++) { 40 seq_put_decimal_ull(m, g ? " " : "", 41 from_kgid_munged(uns, gi->gid[g])); 42 } 43 seq_puts(m, "\n\tCapEff:\t"); 44 cap = cred->cap_effective; 45 CAP_FOR_EACH_U32(__capi) 46 seq_put_hex_ll(m, NULL, cap.cap[CAP_LAST_U32 - __capi], 8); 47 seq_putc(m, '\n'); 48 return 0; 49 } 50 51 static __cold void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, 52 struct seq_file *m) 53 { 54 struct io_sq_data *sq = NULL; 55 struct io_overflow_cqe *ocqe; 56 struct io_rings *r = ctx->rings; 57 unsigned int sq_mask = ctx->sq_entries - 1, cq_mask = ctx->cq_entries - 1; 58 unsigned int sq_head = READ_ONCE(r->sq.head); 59 unsigned int sq_tail = READ_ONCE(r->sq.tail); 60 unsigned int cq_head = READ_ONCE(r->cq.head); 61 unsigned int cq_tail = READ_ONCE(r->cq.tail); 62 unsigned int cq_shift = 0; 63 unsigned int sq_entries, cq_entries; 64 bool has_lock; 65 bool is_cqe32 = (ctx->flags & IORING_SETUP_CQE32); 66 unsigned int i; 67 68 if (is_cqe32) 69 cq_shift = 1; 70 71 /* 72 * we may get imprecise sqe and cqe info if uring is actively running 73 * since we get cached_sq_head and cached_cq_tail without uring_lock 74 * and sq_tail and cq_head are changed by userspace. But it's ok since 75 * we usually use these info when it is stuck. 76 */ 77 seq_printf(m, "SqMask:\t0x%x\n", sq_mask); 78 seq_printf(m, "SqHead:\t%u\n", sq_head); 79 seq_printf(m, "SqTail:\t%u\n", sq_tail); 80 seq_printf(m, "CachedSqHead:\t%u\n", ctx->cached_sq_head); 81 seq_printf(m, "CqMask:\t0x%x\n", cq_mask); 82 seq_printf(m, "CqHead:\t%u\n", cq_head); 83 seq_printf(m, "CqTail:\t%u\n", cq_tail); 84 seq_printf(m, "CachedCqTail:\t%u\n", ctx->cached_cq_tail); 85 seq_printf(m, "SQEs:\t%u\n", sq_tail - ctx->cached_sq_head); 86 sq_entries = min(sq_tail - sq_head, ctx->sq_entries); 87 for (i = 0; i < sq_entries; i++) { 88 unsigned int entry = i + sq_head; 89 unsigned int sq_idx = READ_ONCE(ctx->sq_array[entry & sq_mask]); 90 struct io_uring_sqe *sqe; 91 92 if (sq_idx > sq_mask) 93 continue; 94 sqe = &ctx->sq_sqes[sq_idx]; 95 seq_printf(m, "%5u: opcode:%d, fd:%d, flags:%x, user_data:%llu\n", 96 sq_idx, sqe->opcode, sqe->fd, sqe->flags, 97 sqe->user_data); 98 } 99 seq_printf(m, "CQEs:\t%u\n", cq_tail - cq_head); 100 cq_entries = min(cq_tail - cq_head, ctx->cq_entries); 101 for (i = 0; i < cq_entries; i++) { 102 unsigned int entry = i + cq_head; 103 struct io_uring_cqe *cqe = &r->cqes[(entry & cq_mask) << cq_shift]; 104 105 if (!is_cqe32) { 106 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x\n", 107 entry & cq_mask, cqe->user_data, cqe->res, 108 cqe->flags); 109 } else { 110 seq_printf(m, "%5u: user_data:%llu, res:%d, flag:%x, " 111 "extra1:%llu, extra2:%llu\n", 112 entry & cq_mask, cqe->user_data, cqe->res, 113 cqe->flags, cqe->big_cqe[0], cqe->big_cqe[1]); 114 } 115 } 116 117 /* 118 * Avoid ABBA deadlock between the seq lock and the io_uring mutex, 119 * since fdinfo case grabs it in the opposite direction of normal use 120 * cases. If we fail to get the lock, we just don't iterate any 121 * structures that could be going away outside the io_uring mutex. 122 */ 123 has_lock = mutex_trylock(&ctx->uring_lock); 124 125 if (has_lock && (ctx->flags & IORING_SETUP_SQPOLL)) { 126 sq = ctx->sq_data; 127 if (!sq->thread) 128 sq = NULL; 129 } 130 131 seq_printf(m, "SqThread:\t%d\n", sq ? task_pid_nr(sq->thread) : -1); 132 seq_printf(m, "SqThreadCpu:\t%d\n", sq ? task_cpu(sq->thread) : -1); 133 seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); 134 for (i = 0; has_lock && i < ctx->nr_user_files; i++) { 135 struct file *f = io_file_from_index(&ctx->file_table, i); 136 137 if (f) 138 seq_printf(m, "%5u: %s\n", i, file_dentry(f)->d_iname); 139 else 140 seq_printf(m, "%5u: <none>\n", i); 141 } 142 seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); 143 for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { 144 struct io_mapped_ubuf *buf = ctx->user_bufs[i]; 145 unsigned int len = buf->ubuf_end - buf->ubuf; 146 147 seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, len); 148 } 149 if (has_lock && !xa_empty(&ctx->personalities)) { 150 unsigned long index; 151 const struct cred *cred; 152 153 seq_printf(m, "Personalities:\n"); 154 xa_for_each(&ctx->personalities, index, cred) 155 io_uring_show_cred(m, index, cred); 156 } 157 if (has_lock) 158 mutex_unlock(&ctx->uring_lock); 159 160 seq_puts(m, "PollList:\n"); 161 for (i = 0; i < (1U << ctx->cancel_table.hash_bits); i++) { 162 struct io_hash_bucket *hb = &ctx->cancel_table.hbs[i]; 163 struct io_kiocb *req; 164 165 spin_lock(&hb->lock); 166 hlist_for_each_entry(req, &hb->list, hash_node) 167 seq_printf(m, " op=%d, task_works=%d\n", req->opcode, 168 task_work_pending(req->task)); 169 spin_unlock(&hb->lock); 170 } 171 172 seq_puts(m, "CqOverflowList:\n"); 173 spin_lock(&ctx->completion_lock); 174 list_for_each_entry(ocqe, &ctx->cq_overflow_list, list) { 175 struct io_uring_cqe *cqe = &ocqe->cqe; 176 177 seq_printf(m, " user_data=%llu, res=%d, flags=%x\n", 178 cqe->user_data, cqe->res, cqe->flags); 179 180 } 181 182 spin_unlock(&ctx->completion_lock); 183 } 184 185 __cold void io_uring_show_fdinfo(struct seq_file *m, struct file *f) 186 { 187 struct io_ring_ctx *ctx = f->private_data; 188 189 if (percpu_ref_tryget(&ctx->refs)) { 190 __io_uring_show_fdinfo(ctx, m); 191 percpu_ref_put(&ctx->refs); 192 } 193 } 194 #endif 195