Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/kthread.h>
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/wait.h>
29
30#include <drm/gpu_scheduler.h>
31
32static struct kmem_cache *sched_fence_slab;
33
34static int __init drm_sched_fence_slab_init(void)
35{
36 sched_fence_slab = KMEM_CACHE(drm_sched_fence, SLAB_HWCACHE_ALIGN);
37 if (!sched_fence_slab)
38 return -ENOMEM;
39
40 return 0;
41}
42
43static void __exit drm_sched_fence_slab_fini(void)
44{
45 rcu_barrier();
46 kmem_cache_destroy(sched_fence_slab);
47}
48
49static void drm_sched_fence_set_parent(struct drm_sched_fence *s_fence,
50 struct dma_fence *fence)
51{
52 /*
53 * smp_store_release() to ensure another thread racing us
54 * in drm_sched_fence_set_deadline_finished() sees the
55 * fence's parent set before test_bit()
56 */
57 smp_store_release(&s_fence->parent, dma_fence_get(fence));
58 if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT,
59 &s_fence->finished.flags))
60 dma_fence_set_deadline(fence, s_fence->deadline);
61}
62
63void drm_sched_fence_scheduled(struct drm_sched_fence *fence,
64 struct dma_fence *parent)
65{
66 /* Set the parent before signaling the scheduled fence, such that,
67 * any waiter expecting the parent to be filled after the job has
68 * been scheduled (which is the case for drivers delegating waits
69 * to some firmware) doesn't have to busy wait for parent to show
70 * up.
71 */
72 if (!IS_ERR_OR_NULL(parent))
73 drm_sched_fence_set_parent(fence, parent);
74
75 dma_fence_signal(&fence->scheduled);
76}
77
78void drm_sched_fence_finished(struct drm_sched_fence *fence, int result)
79{
80 if (result)
81 dma_fence_set_error(&fence->finished, result);
82 dma_fence_signal(&fence->finished);
83}
84
85static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
86{
87 return "drm_sched";
88}
89
90static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
91{
92 struct drm_sched_fence *fence = to_drm_sched_fence(f);
93 return (const char *)fence->sched->name;
94}
95
96static void drm_sched_fence_free_rcu(struct rcu_head *rcu)
97{
98 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
99 struct drm_sched_fence *fence = to_drm_sched_fence(f);
100
101 if (!WARN_ON_ONCE(!fence))
102 kmem_cache_free(sched_fence_slab, fence);
103}
104
105/**
106 * drm_sched_fence_free - free up an uninitialized fence
107 *
108 * @fence: fence to free
109 *
110 * Free up the fence memory. Should only be used if drm_sched_fence_init()
111 * has not been called yet.
112 */
113void drm_sched_fence_free(struct drm_sched_fence *fence)
114{
115 /* This function should not be called if the fence has been initialized. */
116 if (!WARN_ON_ONCE(fence->sched))
117 kmem_cache_free(sched_fence_slab, fence);
118}
119
120/**
121 * drm_sched_fence_release_scheduled - callback that fence can be freed
122 *
123 * @f: fence
124 *
125 * This function is called when the reference count becomes zero.
126 * It just RCU schedules freeing up the fence.
127 */
128static void drm_sched_fence_release_scheduled(struct dma_fence *f)
129{
130 struct drm_sched_fence *fence = to_drm_sched_fence(f);
131
132 dma_fence_put(fence->parent);
133 call_rcu(&fence->finished.rcu, drm_sched_fence_free_rcu);
134}
135
136/**
137 * drm_sched_fence_release_finished - drop extra reference
138 *
139 * @f: fence
140 *
141 * Drop the extra reference from the scheduled fence to the base fence.
142 */
143static void drm_sched_fence_release_finished(struct dma_fence *f)
144{
145 struct drm_sched_fence *fence = to_drm_sched_fence(f);
146
147 dma_fence_put(&fence->scheduled);
148}
149
150static void drm_sched_fence_set_deadline_finished(struct dma_fence *f,
151 ktime_t deadline)
152{
153 struct drm_sched_fence *fence = to_drm_sched_fence(f);
154 struct dma_fence *parent;
155 unsigned long flags;
156
157 spin_lock_irqsave(&fence->lock, flags);
158
159 /* If we already have an earlier deadline, keep it: */
160 if (test_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags) &&
161 ktime_before(fence->deadline, deadline)) {
162 spin_unlock_irqrestore(&fence->lock, flags);
163 return;
164 }
165
166 fence->deadline = deadline;
167 set_bit(DRM_SCHED_FENCE_FLAG_HAS_DEADLINE_BIT, &f->flags);
168
169 spin_unlock_irqrestore(&fence->lock, flags);
170
171 /*
172 * smp_load_aquire() to ensure that if we are racing another
173 * thread calling drm_sched_fence_set_parent(), that we see
174 * the parent set before it calls test_bit(HAS_DEADLINE_BIT)
175 */
176 parent = smp_load_acquire(&fence->parent);
177 if (parent)
178 dma_fence_set_deadline(parent, deadline);
179}
180
181static const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
182 .get_driver_name = drm_sched_fence_get_driver_name,
183 .get_timeline_name = drm_sched_fence_get_timeline_name,
184 .release = drm_sched_fence_release_scheduled,
185};
186
187static const struct dma_fence_ops drm_sched_fence_ops_finished = {
188 .get_driver_name = drm_sched_fence_get_driver_name,
189 .get_timeline_name = drm_sched_fence_get_timeline_name,
190 .release = drm_sched_fence_release_finished,
191 .set_deadline = drm_sched_fence_set_deadline_finished,
192};
193
194struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
195{
196 if (f->ops == &drm_sched_fence_ops_scheduled)
197 return container_of(f, struct drm_sched_fence, scheduled);
198
199 if (f->ops == &drm_sched_fence_ops_finished)
200 return container_of(f, struct drm_sched_fence, finished);
201
202 return NULL;
203}
204EXPORT_SYMBOL(to_drm_sched_fence);
205
206struct drm_sched_fence *drm_sched_fence_alloc(struct drm_sched_entity *entity,
207 void *owner)
208{
209 struct drm_sched_fence *fence = NULL;
210
211 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
212 if (fence == NULL)
213 return NULL;
214
215 fence->owner = owner;
216 spin_lock_init(&fence->lock);
217
218 return fence;
219}
220
221void drm_sched_fence_init(struct drm_sched_fence *fence,
222 struct drm_sched_entity *entity)
223{
224 unsigned seq;
225
226 fence->sched = entity->rq->sched;
227 seq = atomic_inc_return(&entity->fence_seq);
228 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
229 &fence->lock, entity->fence_context, seq);
230 dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
231 &fence->lock, entity->fence_context + 1, seq);
232}
233
234module_init(drm_sched_fence_slab_init);
235module_exit(drm_sched_fence_slab_fini);
236
237MODULE_DESCRIPTION("DRM GPU scheduler");
238MODULE_LICENSE("GPL and additional rights");
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/kthread.h>
25#include <linux/module.h>
26#include <linux/sched.h>
27#include <linux/slab.h>
28#include <linux/wait.h>
29
30#include <drm/gpu_scheduler.h>
31
32static struct kmem_cache *sched_fence_slab;
33
34static int __init drm_sched_fence_slab_init(void)
35{
36 sched_fence_slab = kmem_cache_create(
37 "drm_sched_fence", sizeof(struct drm_sched_fence), 0,
38 SLAB_HWCACHE_ALIGN, NULL);
39 if (!sched_fence_slab)
40 return -ENOMEM;
41
42 return 0;
43}
44
45static void __exit drm_sched_fence_slab_fini(void)
46{
47 rcu_barrier();
48 kmem_cache_destroy(sched_fence_slab);
49}
50
51void drm_sched_fence_scheduled(struct drm_sched_fence *fence)
52{
53 int ret = dma_fence_signal(&fence->scheduled);
54
55 if (!ret)
56 DMA_FENCE_TRACE(&fence->scheduled,
57 "signaled from irq context\n");
58 else
59 DMA_FENCE_TRACE(&fence->scheduled,
60 "was already signaled\n");
61}
62
63void drm_sched_fence_finished(struct drm_sched_fence *fence)
64{
65 int ret = dma_fence_signal(&fence->finished);
66
67 if (!ret)
68 DMA_FENCE_TRACE(&fence->finished,
69 "signaled from irq context\n");
70 else
71 DMA_FENCE_TRACE(&fence->finished,
72 "was already signaled\n");
73}
74
75static const char *drm_sched_fence_get_driver_name(struct dma_fence *fence)
76{
77 return "drm_sched";
78}
79
80static const char *drm_sched_fence_get_timeline_name(struct dma_fence *f)
81{
82 struct drm_sched_fence *fence = to_drm_sched_fence(f);
83 return (const char *)fence->sched->name;
84}
85
86/**
87 * drm_sched_fence_free - free up the fence memory
88 *
89 * @rcu: RCU callback head
90 *
91 * Free up the fence memory after the RCU grace period.
92 */
93static void drm_sched_fence_free(struct rcu_head *rcu)
94{
95 struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
96 struct drm_sched_fence *fence = to_drm_sched_fence(f);
97
98 kmem_cache_free(sched_fence_slab, fence);
99}
100
101/**
102 * drm_sched_fence_release_scheduled - callback that fence can be freed
103 *
104 * @fence: fence
105 *
106 * This function is called when the reference count becomes zero.
107 * It just RCU schedules freeing up the fence.
108 */
109static void drm_sched_fence_release_scheduled(struct dma_fence *f)
110{
111 struct drm_sched_fence *fence = to_drm_sched_fence(f);
112
113 dma_fence_put(fence->parent);
114 call_rcu(&fence->finished.rcu, drm_sched_fence_free);
115}
116
117/**
118 * drm_sched_fence_release_finished - drop extra reference
119 *
120 * @f: fence
121 *
122 * Drop the extra reference from the scheduled fence to the base fence.
123 */
124static void drm_sched_fence_release_finished(struct dma_fence *f)
125{
126 struct drm_sched_fence *fence = to_drm_sched_fence(f);
127
128 dma_fence_put(&fence->scheduled);
129}
130
131const struct dma_fence_ops drm_sched_fence_ops_scheduled = {
132 .get_driver_name = drm_sched_fence_get_driver_name,
133 .get_timeline_name = drm_sched_fence_get_timeline_name,
134 .release = drm_sched_fence_release_scheduled,
135};
136
137const struct dma_fence_ops drm_sched_fence_ops_finished = {
138 .get_driver_name = drm_sched_fence_get_driver_name,
139 .get_timeline_name = drm_sched_fence_get_timeline_name,
140 .release = drm_sched_fence_release_finished,
141};
142
143struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f)
144{
145 if (f->ops == &drm_sched_fence_ops_scheduled)
146 return container_of(f, struct drm_sched_fence, scheduled);
147
148 if (f->ops == &drm_sched_fence_ops_finished)
149 return container_of(f, struct drm_sched_fence, finished);
150
151 return NULL;
152}
153EXPORT_SYMBOL(to_drm_sched_fence);
154
155struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
156 void *owner)
157{
158 struct drm_sched_fence *fence = NULL;
159 unsigned seq;
160
161 fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
162 if (fence == NULL)
163 return NULL;
164
165 fence->owner = owner;
166 fence->sched = entity->rq->sched;
167 spin_lock_init(&fence->lock);
168
169 seq = atomic_inc_return(&entity->fence_seq);
170 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled,
171 &fence->lock, entity->fence_context, seq);
172 dma_fence_init(&fence->finished, &drm_sched_fence_ops_finished,
173 &fence->lock, entity->fence_context + 1, seq);
174
175 return fence;
176}
177
178module_init(drm_sched_fence_slab_init);
179module_exit(drm_sched_fence_slab_fini);
180
181MODULE_DESCRIPTION("DRM GPU scheduler");
182MODULE_LICENSE("GPL and additional rights");