Lines Matching refs:pthr

779 	struct perf_thread *pthr = data;  in perf_dma_copy_callback()  local
781 atomic_dec(&pthr->dma_sync); in perf_dma_copy_callback()
782 wake_up(&pthr->dma_wait); in perf_dma_copy_callback()
785 static int perf_copy_chunk(struct perf_thread *pthr, in perf_copy_chunk() argument
792 struct perf_peer *peer = pthr->perf->test_peer; in perf_copy_chunk()
802 dma_dev = pthr->dma_chan->device->dev; in perf_copy_chunk()
804 if (!is_dma_copy_aligned(pthr->dma_chan->device, offset_in_page(src), in perf_copy_chunk()
826 tx = dmaengine_prep_dma_memcpy(pthr->dma_chan, dst_dma_addr, in perf_copy_chunk()
838 tx->callback_param = pthr; in perf_copy_chunk()
849 atomic_inc(&pthr->dma_sync); in perf_copy_chunk()
850 dma_async_issue_pending(pthr->dma_chan); in perf_copy_chunk()
853 return likely(atomic_read(&pthr->perf->tsync) > 0) ? 0 : -EINTR; in perf_copy_chunk()
871 static int perf_init_test(struct perf_thread *pthr) in perf_init_test() argument
873 struct perf_ctx *perf = pthr->perf; in perf_init_test()
875 struct perf_peer *peer = pthr->perf->test_peer; in perf_init_test()
877 pthr->src = kmalloc_node(perf->test_peer->outbuf_size, GFP_KERNEL, in perf_init_test()
879 if (!pthr->src) in perf_init_test()
882 get_random_bytes(pthr->src, perf->test_peer->outbuf_size); in perf_init_test()
889 pthr->dma_chan = dma_request_channel(dma_mask, perf_dma_filter, perf); in perf_init_test()
890 if (!pthr->dma_chan) { in perf_init_test()
892 pthr->tidx); in perf_init_test()
896 dma_map_resource(pthr->dma_chan->device->dev, in perf_init_test()
899 if (dma_mapping_error(pthr->dma_chan->device->dev, in perf_init_test()
901 dev_err(pthr->dma_chan->device->dev, "%d: Failed to map DMA addr\n", in perf_init_test()
902 pthr->tidx); in perf_init_test()
904 dma_release_channel(pthr->dma_chan); in perf_init_test()
907 dev_dbg(pthr->dma_chan->device->dev, "%d: Map MMIO %pa to DMA addr %pad\n", in perf_init_test()
908 pthr->tidx, in perf_init_test()
912 atomic_set(&pthr->dma_sync, 0); in perf_init_test()
918 kfree(pthr->src); in perf_init_test()
922 static int perf_run_test(struct perf_thread *pthr) in perf_run_test() argument
924 struct perf_peer *peer = pthr->perf->test_peer; in perf_run_test()
925 struct perf_ctx *perf = pthr->perf; in perf_run_test()
935 flt_src = pthr->src; in perf_run_test()
939 pthr->duration = ktime_get(); in perf_run_test()
942 while (pthr->copied < total_size) { in perf_run_test()
943 ret = perf_copy_chunk(pthr, flt_dst, flt_src, chunk_size); in perf_run_test()
946 pthr->tidx, ret); in perf_run_test()
950 pthr->copied += chunk_size; in perf_run_test()
956 flt_src = pthr->src; in perf_run_test()
966 static int perf_sync_test(struct perf_thread *pthr) in perf_sync_test() argument
968 struct perf_ctx *perf = pthr->perf; in perf_sync_test()
973 wait_event(pthr->dma_wait, in perf_sync_test()
974 (atomic_read(&pthr->dma_sync) == 0 || in perf_sync_test()
981 pthr->duration = ktime_sub(ktime_get(), pthr->duration); in perf_sync_test()
984 pthr->tidx, pthr->copied); in perf_sync_test()
987 pthr->tidx, ktime_to_us(pthr->duration)); in perf_sync_test()
989 dev_dbg(&perf->ntb->dev, "%d: %llu MBytes/s\n", pthr->tidx, in perf_sync_test()
990 div64_u64(pthr->copied, ktime_to_us(pthr->duration))); in perf_sync_test()
995 static void perf_clear_test(struct perf_thread *pthr) in perf_clear_test() argument
997 struct perf_ctx *perf = pthr->perf; in perf_clear_test()
1006 (void)dmaengine_terminate_sync(pthr->dma_chan); in perf_clear_test()
1007 if (pthr->perf->test_peer->dma_dst_addr) in perf_clear_test()
1008 dma_unmap_resource(pthr->dma_chan->device->dev, in perf_clear_test()
1009 pthr->perf->test_peer->dma_dst_addr, in perf_clear_test()
1010 pthr->perf->test_peer->outbuf_size, in perf_clear_test()
1013 dma_release_channel(pthr->dma_chan); in perf_clear_test()
1018 kfree(pthr->src); in perf_clear_test()
1023 struct perf_thread *pthr = to_thread_work(work); in perf_thread_work() local
1033 ret = perf_init_test(pthr); in perf_thread_work()
1035 pthr->status = ret; in perf_thread_work()
1039 ret = perf_run_test(pthr); in perf_thread_work()
1041 pthr->status = ret; in perf_thread_work()
1045 pthr->status = perf_sync_test(pthr); in perf_thread_work()
1048 perf_clear_test(pthr); in perf_thread_work()
1082 struct perf_thread *pthr; in perf_submit_test() local
1096 pthr = &perf->threads[tidx]; in perf_submit_test()
1098 pthr->status = -ENODATA; in perf_submit_test()
1099 pthr->copied = 0; in perf_submit_test()
1100 pthr->duration = ktime_set(0, 0); in perf_submit_test()
1102 (void)queue_work(perf_wq, &pthr->work); in perf_submit_test()
1120 struct perf_thread *pthr; in perf_read_stats() local
1130 pthr = &perf->threads[tidx]; in perf_read_stats()
1132 if (pthr->status == -ENODATA) in perf_read_stats()
1135 if (pthr->status) { in perf_read_stats()
1137 "%d: error status %d\n", tidx, pthr->status); in perf_read_stats()
1143 tidx, pthr->copied, ktime_to_us(pthr->duration), in perf_read_stats()
1144 div64_u64(pthr->copied, ktime_to_us(pthr->duration))); in perf_read_stats()
1154 struct perf_thread *pthr; in perf_init_threads() local
1162 pthr = &perf->threads[tidx]; in perf_init_threads()
1164 pthr->perf = perf; in perf_init_threads()
1165 pthr->tidx = tidx; in perf_init_threads()
1166 pthr->status = -ENODATA; in perf_init_threads()
1167 init_waitqueue_head(&pthr->dma_wait); in perf_init_threads()
1168 INIT_WORK(&pthr->work, perf_thread_work); in perf_init_threads()