Loading...
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
29#include "amdgpu_trace.h"
30
31static void amdgpu_job_timedout(struct drm_sched_job *s_job)
32{
33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
34
35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 job->base.sched->name,
37 atomic_read(&job->ring->fence_drv.last_seq),
38 job->ring->fence_drv.sync_seq);
39
40 amdgpu_device_gpu_recover(job->adev, job, false);
41}
42
43int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
44 struct amdgpu_job **job, struct amdgpu_vm *vm)
45{
46 size_t size = sizeof(struct amdgpu_job);
47
48 if (num_ibs == 0)
49 return -EINVAL;
50
51 size += sizeof(struct amdgpu_ib) * num_ibs;
52
53 *job = kzalloc(size, GFP_KERNEL);
54 if (!*job)
55 return -ENOMEM;
56
57 (*job)->adev = adev;
58 (*job)->vm = vm;
59 (*job)->ibs = (void *)&(*job)[1];
60 (*job)->num_ibs = num_ibs;
61
62 amdgpu_sync_create(&(*job)->sync);
63 amdgpu_sync_create(&(*job)->sched_sync);
64 (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
65
66 return 0;
67}
68
69int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
70 struct amdgpu_job **job)
71{
72 int r;
73
74 r = amdgpu_job_alloc(adev, 1, job, NULL);
75 if (r)
76 return r;
77
78 r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
79 if (r)
80 kfree(*job);
81 else
82 (*job)->vm_pd_addr = adev->gart.table_addr;
83
84 return r;
85}
86
87void amdgpu_job_free_resources(struct amdgpu_job *job)
88{
89 struct dma_fence *f;
90 unsigned i;
91
92 /* use sched fence if available */
93 f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
94
95 for (i = 0; i < job->num_ibs; ++i)
96 amdgpu_ib_free(job->adev, &job->ibs[i], f);
97}
98
99static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
100{
101 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
102
103 amdgpu_ring_priority_put(job->ring, s_job->s_priority);
104 dma_fence_put(job->fence);
105 amdgpu_sync_free(&job->sync);
106 amdgpu_sync_free(&job->sched_sync);
107 kfree(job);
108}
109
110void amdgpu_job_free(struct amdgpu_job *job)
111{
112 amdgpu_job_free_resources(job);
113
114 dma_fence_put(job->fence);
115 amdgpu_sync_free(&job->sync);
116 amdgpu_sync_free(&job->sched_sync);
117 kfree(job);
118}
119
120int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
121 struct drm_sched_entity *entity, void *owner,
122 struct dma_fence **f)
123{
124 int r;
125 job->ring = ring;
126
127 if (!f)
128 return -EINVAL;
129
130 r = drm_sched_job_init(&job->base, &ring->sched, entity, owner);
131 if (r)
132 return r;
133
134 job->owner = owner;
135 job->fence_ctx = entity->fence_context;
136 *f = dma_fence_get(&job->base.s_fence->finished);
137 amdgpu_job_free_resources(job);
138 amdgpu_ring_priority_get(job->ring, job->base.s_priority);
139 drm_sched_entity_push_job(&job->base, entity);
140
141 return 0;
142}
143
144static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
145 struct drm_sched_entity *s_entity)
146{
147 struct amdgpu_job *job = to_amdgpu_job(sched_job);
148 struct amdgpu_vm *vm = job->vm;
149 bool explicit = false;
150 int r;
151 struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync, &explicit);
152
153 if (fence && explicit) {
154 if (drm_sched_dependency_optimized(fence, s_entity)) {
155 r = amdgpu_sync_fence(job->adev, &job->sched_sync, fence, false);
156 if (r)
157 DRM_ERROR("Error adding fence to sync (%d)\n", r);
158 }
159 }
160
161 while (fence == NULL && vm && !job->vmid) {
162 struct amdgpu_ring *ring = job->ring;
163
164 r = amdgpu_vmid_grab(vm, ring, &job->sync,
165 &job->base.s_fence->finished,
166 job);
167 if (r)
168 DRM_ERROR("Error getting VM ID (%d)\n", r);
169
170 fence = amdgpu_sync_get_fence(&job->sync, NULL);
171 }
172
173 return fence;
174}
175
176static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job)
177{
178 struct dma_fence *fence = NULL, *finished;
179 struct amdgpu_device *adev;
180 struct amdgpu_job *job;
181 int r;
182
183 if (!sched_job) {
184 DRM_ERROR("job is null\n");
185 return NULL;
186 }
187 job = to_amdgpu_job(sched_job);
188 finished = &job->base.s_fence->finished;
189 adev = job->adev;
190
191 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
192
193 trace_amdgpu_sched_run_job(job);
194
195 if (job->vram_lost_counter != atomic_read(&adev->vram_lost_counter))
196 dma_fence_set_error(finished, -ECANCELED);/* skip IB as well if VRAM lost */
197
198 if (finished->error < 0) {
199 DRM_INFO("Skip scheduling IBs!\n");
200 } else {
201 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job,
202 &fence);
203 if (r)
204 DRM_ERROR("Error scheduling IBs (%d)\n", r);
205 }
206 /* if gpu reset, hw fence will be replaced here */
207 dma_fence_put(job->fence);
208 job->fence = dma_fence_get(fence);
209
210 amdgpu_job_free_resources(job);
211 return fence;
212}
213
214const struct drm_sched_backend_ops amdgpu_sched_ops = {
215 .dependency = amdgpu_job_dependency,
216 .run_job = amdgpu_job_run,
217 .timedout_job = amdgpu_job_timedout,
218 .free_job = amdgpu_job_free_cb
219};
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "amdgpu.h"
29#include "amdgpu_trace.h"
30
31static void amdgpu_job_timedout(struct amd_sched_job *s_job)
32{
33 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
34
35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 job->base.sched->name,
37 atomic_read(&job->ring->fence_drv.last_seq),
38 job->ring->fence_drv.sync_seq);
39 amdgpu_gpu_reset(job->adev);
40}
41
42int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
43 struct amdgpu_job **job, struct amdgpu_vm *vm)
44{
45 size_t size = sizeof(struct amdgpu_job);
46
47 if (num_ibs == 0)
48 return -EINVAL;
49
50 size += sizeof(struct amdgpu_ib) * num_ibs;
51
52 *job = kzalloc(size, GFP_KERNEL);
53 if (!*job)
54 return -ENOMEM;
55
56 (*job)->adev = adev;
57 (*job)->vm = vm;
58 (*job)->ibs = (void *)&(*job)[1];
59 (*job)->num_ibs = num_ibs;
60
61 amdgpu_sync_create(&(*job)->sync);
62
63 return 0;
64}
65
66int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
67 struct amdgpu_job **job)
68{
69 int r;
70
71 r = amdgpu_job_alloc(adev, 1, job, NULL);
72 if (r)
73 return r;
74
75 r = amdgpu_ib_get(adev, NULL, size, &(*job)->ibs[0]);
76 if (r)
77 kfree(*job);
78
79 return r;
80}
81
82void amdgpu_job_free_resources(struct amdgpu_job *job)
83{
84 struct dma_fence *f;
85 unsigned i;
86
87 /* use sched fence if available */
88 f = job->base.s_fence ? &job->base.s_fence->finished : job->fence;
89
90 for (i = 0; i < job->num_ibs; ++i)
91 amdgpu_ib_free(job->adev, &job->ibs[i], f);
92}
93
94static void amdgpu_job_free_cb(struct amd_sched_job *s_job)
95{
96 struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base);
97
98 dma_fence_put(job->fence);
99 amdgpu_sync_free(&job->sync);
100 kfree(job);
101}
102
103void amdgpu_job_free(struct amdgpu_job *job)
104{
105 amdgpu_job_free_resources(job);
106
107 dma_fence_put(job->fence);
108 amdgpu_sync_free(&job->sync);
109 kfree(job);
110}
111
112int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
113 struct amd_sched_entity *entity, void *owner,
114 struct dma_fence **f)
115{
116 int r;
117 job->ring = ring;
118
119 if (!f)
120 return -EINVAL;
121
122 r = amd_sched_job_init(&job->base, &ring->sched, entity, owner);
123 if (r)
124 return r;
125
126 job->owner = owner;
127 job->fence_ctx = entity->fence_context;
128 *f = dma_fence_get(&job->base.s_fence->finished);
129 amdgpu_job_free_resources(job);
130 amd_sched_entity_push_job(&job->base);
131
132 return 0;
133}
134
135static struct dma_fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
136{
137 struct amdgpu_job *job = to_amdgpu_job(sched_job);
138 struct amdgpu_vm *vm = job->vm;
139
140 struct dma_fence *fence = amdgpu_sync_get_fence(&job->sync);
141
142 if (fence == NULL && vm && !job->vm_id) {
143 struct amdgpu_ring *ring = job->ring;
144 int r;
145
146 r = amdgpu_vm_grab_id(vm, ring, &job->sync,
147 &job->base.s_fence->finished,
148 job);
149 if (r)
150 DRM_ERROR("Error getting VM ID (%d)\n", r);
151
152 fence = amdgpu_sync_get_fence(&job->sync);
153 }
154
155 return fence;
156}
157
158static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job)
159{
160 struct dma_fence *fence = NULL;
161 struct amdgpu_job *job;
162 int r;
163
164 if (!sched_job) {
165 DRM_ERROR("job is null\n");
166 return NULL;
167 }
168 job = to_amdgpu_job(sched_job);
169
170 BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL));
171
172 trace_amdgpu_sched_run_job(job);
173 r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
174 job->sync.last_vm_update, job, &fence);
175 if (r)
176 DRM_ERROR("Error scheduling IBs (%d)\n", r);
177
178 /* if gpu reset, hw fence will be replaced here */
179 dma_fence_put(job->fence);
180 job->fence = dma_fence_get(fence);
181 amdgpu_job_free_resources(job);
182 return fence;
183}
184
185const struct amd_sched_backend_ops amdgpu_sched_ops = {
186 .dependency = amdgpu_job_dependency,
187 .run_job = amdgpu_job_run,
188 .timedout_job = amdgpu_job_timedout,
189 .free_job = amdgpu_job_free_cb
190};