1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * dma-fence-util: misc functions for dma_fence objects
4 *
5 * Copyright (C) 2022 Advanced Micro Devices, Inc.
6 * Authors:
7 * Christian König <christian.koenig@amd.com>
8 */
9
10 #include <linux/dma-fence.h>
11 #include <linux/dma-fence-array.h>
12 #include <linux/dma-fence-chain.h>
13 #include <linux/dma-fence-unwrap.h>
14 #include <linux/slab.h>
15 #include <linux/sort.h>
16
17 /* Internal helper to start new array iteration, don't use directly */
18 static struct dma_fence *
__dma_fence_unwrap_array(struct dma_fence_unwrap * cursor)19 __dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
20 {
21 cursor->array = dma_fence_chain_contained(cursor->chain);
22 cursor->index = 0;
23 return dma_fence_array_first(cursor->array);
24 }
25
26 /**
27 * dma_fence_unwrap_first - return the first fence from fence containers
28 * @head: the entrypoint into the containers
29 * @cursor: current position inside the containers
30 *
31 * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
32 * first fence.
33 */
dma_fence_unwrap_first(struct dma_fence * head,struct dma_fence_unwrap * cursor)34 struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
35 struct dma_fence_unwrap *cursor)
36 {
37 cursor->chain = dma_fence_get(head);
38 return __dma_fence_unwrap_array(cursor);
39 }
40 EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
41
42 /**
43 * dma_fence_unwrap_next - return the next fence from a fence containers
44 * @cursor: current position inside the containers
45 *
46 * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
47 * the next fence from them.
48 */
dma_fence_unwrap_next(struct dma_fence_unwrap * cursor)49 struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
50 {
51 struct dma_fence *tmp;
52
53 ++cursor->index;
54 tmp = dma_fence_array_next(cursor->array, cursor->index);
55 if (tmp)
56 return tmp;
57
58 cursor->chain = dma_fence_chain_walk(cursor->chain);
59 return __dma_fence_unwrap_array(cursor);
60 }
61 EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
62
63
fence_cmp(const void * _a,const void * _b)64 static int fence_cmp(const void *_a, const void *_b)
65 {
66 struct dma_fence *a = *(struct dma_fence **)_a;
67 struct dma_fence *b = *(struct dma_fence **)_b;
68
69 if (a->context < b->context)
70 return -1;
71 else if (a->context > b->context)
72 return 1;
73
74 if (dma_fence_is_later(b, a))
75 return 1;
76 else if (dma_fence_is_later(a, b))
77 return -1;
78
79 return 0;
80 }
81
82 /* Implementation for the dma_fence_merge() marco, don't use directly */
__dma_fence_unwrap_merge(unsigned int num_fences,struct dma_fence ** fences,struct dma_fence_unwrap * iter)83 struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
84 struct dma_fence **fences,
85 struct dma_fence_unwrap *iter)
86 {
87 struct dma_fence_array *result;
88 struct dma_fence *tmp, **array;
89 ktime_t timestamp;
90 int i, j, count;
91
92 count = 0;
93 timestamp = ns_to_ktime(0);
94 for (i = 0; i < num_fences; ++i) {
95 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
96 if (!dma_fence_is_signaled(tmp)) {
97 ++count;
98 } else {
99 ktime_t t = dma_fence_timestamp(tmp);
100
101 if (ktime_after(t, timestamp))
102 timestamp = t;
103 }
104 }
105 }
106
107 /*
108 * If we couldn't find a pending fence just return a private signaled
109 * fence with the timestamp of the last signaled one.
110 */
111 if (count == 0)
112 return dma_fence_allocate_private_stub(timestamp);
113
114 array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
115 if (!array)
116 return NULL;
117
118 count = 0;
119 for (i = 0; i < num_fences; ++i) {
120 dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
121 if (!dma_fence_is_signaled(tmp)) {
122 array[count++] = dma_fence_get(tmp);
123 } else {
124 ktime_t t = dma_fence_timestamp(tmp);
125
126 if (ktime_after(t, timestamp))
127 timestamp = t;
128 }
129 }
130 }
131
132 if (count == 0 || count == 1)
133 goto return_fastpath;
134
135 sort(array, count, sizeof(*array), fence_cmp, NULL);
136
137 /*
138 * Only keep the most recent fence for each context.
139 */
140 j = 0;
141 for (i = 1; i < count; i++) {
142 if (array[i]->context == array[j]->context)
143 dma_fence_put(array[i]);
144 else
145 array[++j] = array[i];
146 }
147 count = ++j;
148
149 if (count > 1) {
150 result = dma_fence_array_create(count, array,
151 dma_fence_context_alloc(1),
152 1, false);
153 if (!result) {
154 for (i = 0; i < count; i++)
155 dma_fence_put(array[i]);
156 tmp = NULL;
157 goto return_tmp;
158 }
159 return &result->base;
160 }
161
162 return_fastpath:
163 if (count == 0)
164 tmp = dma_fence_allocate_private_stub(timestamp);
165 else
166 tmp = array[0];
167
168 return_tmp:
169 kfree(array);
170 return tmp;
171 }
172 EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);
173