1dc5698e8SDave Airlie /*
2dc5698e8SDave Airlie * Copyright (C) 2015 Red Hat, Inc.
3dc5698e8SDave Airlie * All Rights Reserved.
4dc5698e8SDave Airlie *
5dc5698e8SDave Airlie * Permission is hereby granted, free of charge, to any person obtaining
6dc5698e8SDave Airlie * a copy of this software and associated documentation files (the
7dc5698e8SDave Airlie * "Software"), to deal in the Software without restriction, including
8dc5698e8SDave Airlie * without limitation the rights to use, copy, modify, merge, publish,
9dc5698e8SDave Airlie * distribute, sublicense, and/or sell copies of the Software, and to
10dc5698e8SDave Airlie * permit persons to whom the Software is furnished to do so, subject to
11dc5698e8SDave Airlie * the following conditions:
12dc5698e8SDave Airlie *
13dc5698e8SDave Airlie * The above copyright notice and this permission notice (including the
14dc5698e8SDave Airlie * next paragraph) shall be included in all copies or substantial
15dc5698e8SDave Airlie * portions of the Software.
16dc5698e8SDave Airlie *
17dc5698e8SDave Airlie * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18dc5698e8SDave Airlie * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19dc5698e8SDave Airlie * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20dc5698e8SDave Airlie * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
21dc5698e8SDave Airlie * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22dc5698e8SDave Airlie * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23dc5698e8SDave Airlie * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24dc5698e8SDave Airlie */
25dc5698e8SDave Airlie
2648ad7751SChia-I Wu #include <trace/events/dma_fence.h>
27a3d63977SSam Ravnborg
28dc5698e8SDave Airlie #include "virtgpu_drv.h"
29dc5698e8SDave Airlie
3041a90202SGurchetan Singh #define to_virtio_gpu_fence(x) \
31c91a1e2bSGurchetan Singh container_of(x, struct virtio_gpu_fence, f)
32c91a1e2bSGurchetan Singh
virtio_gpu_get_driver_name(struct dma_fence * f)3341a90202SGurchetan Singh static const char *virtio_gpu_get_driver_name(struct dma_fence *f)
34dc5698e8SDave Airlie {
35dc5698e8SDave Airlie return "virtio_gpu";
36dc5698e8SDave Airlie }
37dc5698e8SDave Airlie
virtio_gpu_get_timeline_name(struct dma_fence * f)3841a90202SGurchetan Singh static const char *virtio_gpu_get_timeline_name(struct dma_fence *f)
39dc5698e8SDave Airlie {
40dc5698e8SDave Airlie return "controlq";
41dc5698e8SDave Airlie }
42dc5698e8SDave Airlie
virtio_gpu_fence_signaled(struct dma_fence * f)4341a90202SGurchetan Singh static bool virtio_gpu_fence_signaled(struct dma_fence *f)
44dc5698e8SDave Airlie {
4512afce08SGerd Hoffmann /* leaked fence outside driver before completing
46b9662c3aSGurchetan Singh * initialization with virtio_gpu_fence_emit.
47b9662c3aSGurchetan Singh */
48b9662c3aSGurchetan Singh WARN_ON_ONCE(f->seqno == 0);
49dc5698e8SDave Airlie return false;
50dc5698e8SDave Airlie }
51dc5698e8SDave Airlie
virtio_gpu_fence_value_str(struct dma_fence * f,char * str,int size)5241a90202SGurchetan Singh static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size)
53dc5698e8SDave Airlie {
5436549848SGurchetan Singh snprintf(str, size, "[%llu, %llu]", f->context, f->seqno);
55dc5698e8SDave Airlie }
56dc5698e8SDave Airlie
virtio_gpu_timeline_value_str(struct dma_fence * f,char * str,int size)5741a90202SGurchetan Singh static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str,
5841a90202SGurchetan Singh int size)
59dc5698e8SDave Airlie {
6041a90202SGurchetan Singh struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f);
61dc5698e8SDave Airlie
6265f8453dSGurchetan Singh snprintf(str, size, "%llu",
6365f8453dSGurchetan Singh (u64)atomic64_read(&fence->drv->last_fence_id));
64dc5698e8SDave Airlie }
65dc5698e8SDave Airlie
6641a90202SGurchetan Singh static const struct dma_fence_ops virtio_gpu_fence_ops = {
6741a90202SGurchetan Singh .get_driver_name = virtio_gpu_get_driver_name,
6841a90202SGurchetan Singh .get_timeline_name = virtio_gpu_get_timeline_name,
6941a90202SGurchetan Singh .signaled = virtio_gpu_fence_signaled,
7041a90202SGurchetan Singh .fence_value_str = virtio_gpu_fence_value_str,
7141a90202SGurchetan Singh .timeline_value_str = virtio_gpu_timeline_value_str,
72dc5698e8SDave Airlie };
73dc5698e8SDave Airlie
virtio_gpu_fence_alloc(struct virtio_gpu_device * vgdev,uint64_t base_fence_ctx,uint32_t ring_idx)74e8b6e76fSGurchetan Singh struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev,
75e8b6e76fSGurchetan Singh uint64_t base_fence_ctx,
76e8b6e76fSGurchetan Singh uint32_t ring_idx)
779fdd90c0SRobert Foss {
78bbf588d7SGurchetan Singh uint64_t fence_context = base_fence_ctx + ring_idx;
799fdd90c0SRobert Foss struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
809fdd90c0SRobert Foss struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence),
814d8979b3SChia-I Wu GFP_KERNEL);
82bbf588d7SGurchetan Singh
839fdd90c0SRobert Foss if (!fence)
849fdd90c0SRobert Foss return fence;
859fdd90c0SRobert Foss
869fdd90c0SRobert Foss fence->drv = drv;
87bbf588d7SGurchetan Singh fence->ring_idx = ring_idx;
88bbf588d7SGurchetan Singh fence->emit_fence_info = !(base_fence_ctx == drv->context);
89efe2bf96SChia-I Wu
90efe2bf96SChia-I Wu /* This only partially initializes the fence because the seqno is
91efe2bf96SChia-I Wu * unknown yet. The fence must not be used outside of the driver
92efe2bf96SChia-I Wu * until virtio_gpu_fence_emit is called.
93efe2bf96SChia-I Wu */
94bbf588d7SGurchetan Singh
95bbf588d7SGurchetan Singh dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock,
96bbf588d7SGurchetan Singh fence_context, 0);
979fdd90c0SRobert Foss
989fdd90c0SRobert Foss return fence;
999fdd90c0SRobert Foss }
1009fdd90c0SRobert Foss
virtio_gpu_fence_emit(struct virtio_gpu_device * vgdev,struct virtio_gpu_ctrl_hdr * cmd_hdr,struct virtio_gpu_fence * fence)101fa2b7c21SRobert Foss void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
102dc5698e8SDave Airlie struct virtio_gpu_ctrl_hdr *cmd_hdr,
1034d55fd66SGerd Hoffmann struct virtio_gpu_fence *fence)
104dc5698e8SDave Airlie {
105dc5698e8SDave Airlie struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
106dc5698e8SDave Airlie unsigned long irq_flags;
107dc5698e8SDave Airlie
108dc5698e8SDave Airlie spin_lock_irqsave(&drv->lock, irq_flags);
10936549848SGurchetan Singh fence->fence_id = fence->f.seqno = ++drv->current_fence_id;
1104d55fd66SGerd Hoffmann dma_fence_get(&fence->f);
1114d55fd66SGerd Hoffmann list_add_tail(&fence->node, &drv->fences);
112dc5698e8SDave Airlie spin_unlock_irqrestore(&drv->lock, irq_flags);
113dc5698e8SDave Airlie
11448ad7751SChia-I Wu trace_dma_fence_emit(&fence->f);
11548ad7751SChia-I Wu
116dc5698e8SDave Airlie cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE);
11736549848SGurchetan Singh cmd_hdr->fence_id = cpu_to_le64(fence->fence_id);
118bbf588d7SGurchetan Singh
119bbf588d7SGurchetan Singh /* Only currently defined fence param. */
120bbf588d7SGurchetan Singh if (fence->emit_fence_info) {
121bbf588d7SGurchetan Singh cmd_hdr->flags |=
122bbf588d7SGurchetan Singh cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX);
123bbf588d7SGurchetan Singh cmd_hdr->ring_idx = (u8)fence->ring_idx;
124bbf588d7SGurchetan Singh }
125dc5698e8SDave Airlie }
126dc5698e8SDave Airlie
virtio_gpu_fence_event_process(struct virtio_gpu_device * vgdev,u64 fence_id)127dc5698e8SDave Airlie void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev,
128bb53a604SGurchetan Singh u64 fence_id)
129dc5698e8SDave Airlie {
130dc5698e8SDave Airlie struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv;
13136549848SGurchetan Singh struct virtio_gpu_fence *signaled, *curr, *tmp;
132dc5698e8SDave Airlie unsigned long irq_flags;
133dc5698e8SDave Airlie
134dc5698e8SDave Airlie spin_lock_irqsave(&drv->lock, irq_flags);
13565f8453dSGurchetan Singh atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id);
13636549848SGurchetan Singh list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
13736549848SGurchetan Singh if (fence_id != curr->fence_id)
138dc5698e8SDave Airlie continue;
13936549848SGurchetan Singh
14036549848SGurchetan Singh signaled = curr;
14136549848SGurchetan Singh
14236549848SGurchetan Singh /*
14336549848SGurchetan Singh * Signal any fences with a strictly smaller sequence number
14436549848SGurchetan Singh * than the current signaled fence.
14536549848SGurchetan Singh */
14636549848SGurchetan Singh list_for_each_entry_safe(curr, tmp, &drv->fences, node) {
14736549848SGurchetan Singh /* dma-fence contexts must match */
14836549848SGurchetan Singh if (signaled->f.context != curr->f.context)
14936549848SGurchetan Singh continue;
15036549848SGurchetan Singh
15136549848SGurchetan Singh if (!dma_fence_is_later(&signaled->f, &curr->f))
15236549848SGurchetan Singh continue;
15336549848SGurchetan Singh
15436549848SGurchetan Singh dma_fence_signal_locked(&curr->f);
155*cd7f5ca3SGurchetan Singh if (curr->e) {
156*cd7f5ca3SGurchetan Singh drm_send_event(vgdev->ddev, &curr->e->base);
157*cd7f5ca3SGurchetan Singh curr->e = NULL;
158*cd7f5ca3SGurchetan Singh }
159*cd7f5ca3SGurchetan Singh
16036549848SGurchetan Singh list_del(&curr->node);
16136549848SGurchetan Singh dma_fence_put(&curr->f);
16236549848SGurchetan Singh }
16336549848SGurchetan Singh
16436549848SGurchetan Singh dma_fence_signal_locked(&signaled->f);
165*cd7f5ca3SGurchetan Singh if (signaled->e) {
166*cd7f5ca3SGurchetan Singh drm_send_event(vgdev->ddev, &signaled->e->base);
167*cd7f5ca3SGurchetan Singh signaled->e = NULL;
168*cd7f5ca3SGurchetan Singh }
169*cd7f5ca3SGurchetan Singh
17036549848SGurchetan Singh list_del(&signaled->node);
17136549848SGurchetan Singh dma_fence_put(&signaled->f);
17236549848SGurchetan Singh break;
173dc5698e8SDave Airlie }
174dc5698e8SDave Airlie spin_unlock_irqrestore(&drv->lock, irq_flags);
175dc5698e8SDave Airlie }
176