1 /*
2  * Copyright 2011 Christian König.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the
7  * "Software"), to deal in the Software without restriction, including
8  * without limitation the rights to use, copy, modify, merge, publish,
9  * distribute, sub license, and/or sell copies of the Software, and to
10  * permit persons to whom the Software is furnished to do so, subject to
11  * the following conditions:
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19  * USE OR OTHER DEALINGS IN THE SOFTWARE.
20  *
21  * The above copyright notice and this permission notice (including the
22  * next paragraph) shall be included in all copies or substantial portions
23  * of the Software.
24  *
25  */
26 /*
27  * Authors:
28  *    Christian König <deathsimple@vodafone.de>
29  */
30 #include "drmP.h"
31 #include "drm.h"
32 #include "radeon.h"
33 
34 static int radeon_semaphore_add_bo(struct radeon_device *rdev)
35 {
36 	struct radeon_semaphore_bo *bo;
37 	unsigned long irq_flags;
38 	uint64_t gpu_addr;
39 	uint32_t *cpu_ptr;
40 	int r, i;
41 
42 
43 	bo = kmalloc(sizeof(struct radeon_semaphore_bo), GFP_KERNEL);
44 	if (bo == NULL) {
45 		return -ENOMEM;
46 	}
47 	INIT_LIST_HEAD(&bo->free);
48 	INIT_LIST_HEAD(&bo->list);
49 	bo->nused = 0;
50 
51 	r = radeon_ib_get(rdev, 0, &bo->ib, RADEON_SEMAPHORE_BO_SIZE);
52 	if (r) {
53 		dev_err(rdev->dev, "failed to get a bo after 5 retry\n");
54 		kfree(bo);
55 		return r;
56 	}
57 	gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
58 	gpu_addr += bo->ib->sa_bo.offset;
59 	cpu_ptr = rdev->ib_pool.sa_manager.cpu_ptr;
60 	cpu_ptr += (bo->ib->sa_bo.offset >> 2);
61 	for (i = 0; i < (RADEON_SEMAPHORE_BO_SIZE/8); i++) {
62 		bo->semaphores[i].gpu_addr = gpu_addr;
63 		bo->semaphores[i].cpu_ptr = cpu_ptr;
64 		bo->semaphores[i].bo = bo;
65 		list_add_tail(&bo->semaphores[i].list, &bo->free);
66 		gpu_addr += 8;
67 		cpu_ptr += 2;
68 	}
69 	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
70 	list_add_tail(&bo->list, &rdev->semaphore_drv.bo);
71 	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
72 	return 0;
73 }
74 
75 static void radeon_semaphore_del_bo_locked(struct radeon_device *rdev,
76 					   struct radeon_semaphore_bo *bo)
77 {
78 	radeon_sa_bo_free(rdev, &bo->ib->sa_bo);
79 	radeon_fence_unref(&bo->ib->fence);
80 	list_del(&bo->list);
81 	kfree(bo);
82 }
83 
84 void radeon_semaphore_shrink_locked(struct radeon_device *rdev)
85 {
86 	struct radeon_semaphore_bo *bo, *n;
87 
88 	if (list_empty(&rdev->semaphore_drv.bo)) {
89 		return;
90 	}
91 	/* only shrink if first bo has free semaphore */
92 	bo = list_first_entry(&rdev->semaphore_drv.bo, struct radeon_semaphore_bo, list);
93 	if (list_empty(&bo->free)) {
94 		return;
95 	}
96 	list_for_each_entry_safe_continue(bo, n, &rdev->semaphore_drv.bo, list) {
97 		if (bo->nused)
98 			continue;
99 		radeon_semaphore_del_bo_locked(rdev, bo);
100 	}
101 }
102 
103 int radeon_semaphore_create(struct radeon_device *rdev,
104 			    struct radeon_semaphore **semaphore)
105 {
106 	struct radeon_semaphore_bo *bo;
107 	unsigned long irq_flags;
108 	bool do_retry = true;
109 	int r;
110 
111 retry:
112 	*semaphore = NULL;
113 	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
114 	list_for_each_entry(bo, &rdev->semaphore_drv.bo, list) {
115 		if (list_empty(&bo->free))
116 			continue;
117 		*semaphore = list_first_entry(&bo->free, struct radeon_semaphore, list);
118 		(*semaphore)->cpu_ptr[0] = 0;
119 		(*semaphore)->cpu_ptr[1] = 0;
120 		list_del(&(*semaphore)->list);
121 		bo->nused++;
122 		break;
123 	}
124 	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
125 
126 	if (*semaphore == NULL) {
127 		if (do_retry) {
128 			do_retry = false;
129 			r = radeon_semaphore_add_bo(rdev);
130 			if (r)
131 				return r;
132 			goto retry;
133 		}
134 		return -ENOMEM;
135 	}
136 
137 	return 0;
138 }
139 
140 void radeon_semaphore_emit_signal(struct radeon_device *rdev, int ring,
141 			          struct radeon_semaphore *semaphore)
142 {
143 	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, false);
144 }
145 
146 void radeon_semaphore_emit_wait(struct radeon_device *rdev, int ring,
147 			        struct radeon_semaphore *semaphore)
148 {
149 	radeon_semaphore_ring_emit(rdev, ring, &rdev->ring[ring], semaphore, true);
150 }
151 
152 void radeon_semaphore_free(struct radeon_device *rdev,
153 			   struct radeon_semaphore *semaphore)
154 {
155 	unsigned long irq_flags;
156 
157 	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
158 	semaphore->bo->nused--;
159 	list_add_tail(&semaphore->list, &semaphore->bo->free);
160 	radeon_semaphore_shrink_locked(rdev);
161 	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
162 }
163 
164 void radeon_semaphore_driver_fini(struct radeon_device *rdev)
165 {
166 	struct radeon_semaphore_bo *bo, *n;
167 	unsigned long irq_flags;
168 
169 	write_lock_irqsave(&rdev->semaphore_drv.lock, irq_flags);
170 	/* we force to free everything */
171 	list_for_each_entry_safe(bo, n, &rdev->semaphore_drv.bo, list) {
172 		if (!list_empty(&bo->free)) {
173 			dev_err(rdev->dev, "still in use semaphore\n");
174 		}
175 		radeon_semaphore_del_bo_locked(rdev, bo);
176 	}
177 	write_unlock_irqrestore(&rdev->semaphore_drv.lock, irq_flags);
178 }
179