Lines Matching full:imu
137 struct io_mapped_ubuf *imu = *slot;
140 if (imu != &dummy_ubuf) {
141 for (i = 0; i < imu->nr_bvecs; i++)
142 unpin_user_page(imu->bvec[i].bv_page);
143 if (imu->acct_pages)
144 io_unaccount_mem(ctx, imu->acct_pages);
145 kvfree(imu);
436 struct io_mapped_ubuf *imu;
453 err = io_sqe_buffer_register(ctx, &iov, &imu, &last_hpage);
462 io_buffer_unmap(ctx, &imu);
468 ctx->user_bufs[i] = imu;
831 struct io_mapped_ubuf *imu = ctx->user_bufs[i];
833 for (j = 0; j < imu->nr_bvecs; j++) {
834 if (!PageCompound(imu->bvec[j].bv_page))
836 if (compound_head(imu->bvec[j].bv_page) == hpage)
845 int nr_pages, struct io_mapped_ubuf *imu,
850 imu->acct_pages = 0;
853 imu->acct_pages++;
863 imu->acct_pages += page_size(hpage) >> PAGE_SHIFT;
867 if (!imu->acct_pages)
870 ret = io_account_mem(ctx, imu->acct_pages);
872 imu->acct_pages = 0;
880 struct io_mapped_ubuf *imu = NULL;
926 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL);
927 if (!imu)
930 ret = io_buffer_account_pin(ctx, pages, nr_pages, imu, last_hpage);
939 imu->ubuf = (unsigned long) iov->iov_base;
940 imu->ubuf_end = imu->ubuf + iov->iov_len;
941 imu->nr_bvecs = nr_pages;
942 *pimu = imu;
946 bvec_set_page(&imu->bvec[0], pages[0], size, off);
953 bvec_set_page(&imu->bvec[i], pages[i], vec_len, off);
959 kvfree(imu);
1025 struct io_mapped_ubuf *imu,
1031 if (WARN_ON_ONCE(!imu))
1036 if (unlikely(buf_addr < imu->ubuf || buf_end > imu->ubuf_end))
1043 offset = buf_addr - imu->ubuf;
1044 iov_iter_bvec(iter, ddir, imu->bvec, imu->nr_bvecs, offset + len);
1063 const struct bio_vec *bvec = imu->bvec;