Loading...
Note: File does not exist in v4.6.
1/*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2018 Intel Corporation
5 */
6
7#include <linux/kref.h>
8
9#include "gem/i915_gem_pm.h"
10#include "gt/intel_gt.h"
11
12#include "i915_selftest.h"
13
14#include "igt_flush_test.h"
15#include "lib_sw_fence.h"
16
17struct live_active {
18 struct i915_active base;
19 struct kref ref;
20 bool retired;
21};
22
23static void __live_get(struct live_active *active)
24{
25 kref_get(&active->ref);
26}
27
28static void __live_free(struct live_active *active)
29{
30 i915_active_fini(&active->base);
31 kfree(active);
32}
33
34static void __live_release(struct kref *ref)
35{
36 struct live_active *active = container_of(ref, typeof(*active), ref);
37
38 __live_free(active);
39}
40
41static void __live_put(struct live_active *active)
42{
43 kref_put(&active->ref, __live_release);
44}
45
46static int __live_active(struct i915_active *base)
47{
48 struct live_active *active = container_of(base, typeof(*active), base);
49
50 __live_get(active);
51 return 0;
52}
53
54static void __live_retire(struct i915_active *base)
55{
56 struct live_active *active = container_of(base, typeof(*active), base);
57
58 active->retired = true;
59 __live_put(active);
60}
61
62static struct live_active *__live_alloc(struct drm_i915_private *i915)
63{
64 struct live_active *active;
65
66 active = kzalloc(sizeof(*active), GFP_KERNEL);
67 if (!active)
68 return NULL;
69
70 kref_init(&active->ref);
71 i915_active_init(i915, &active->base, __live_active, __live_retire);
72
73 return active;
74}
75
76static struct live_active *
77__live_active_setup(struct drm_i915_private *i915)
78{
79 struct intel_engine_cs *engine;
80 struct i915_sw_fence *submit;
81 struct live_active *active;
82 enum intel_engine_id id;
83 unsigned int count = 0;
84 int err = 0;
85
86 active = __live_alloc(i915);
87 if (!active)
88 return ERR_PTR(-ENOMEM);
89
90 submit = heap_fence_create(GFP_KERNEL);
91 if (!submit) {
92 kfree(active);
93 return ERR_PTR(-ENOMEM);
94 }
95
96 err = i915_active_acquire(&active->base);
97 if (err)
98 goto out;
99
100 for_each_engine(engine, i915, id) {
101 struct i915_request *rq;
102
103 rq = i915_request_create(engine->kernel_context);
104 if (IS_ERR(rq)) {
105 err = PTR_ERR(rq);
106 break;
107 }
108
109 err = i915_sw_fence_await_sw_fence_gfp(&rq->submit,
110 submit,
111 GFP_KERNEL);
112 if (err >= 0)
113 err = i915_active_ref(&active->base, rq->timeline, rq);
114 i915_request_add(rq);
115 if (err) {
116 pr_err("Failed to track active ref!\n");
117 break;
118 }
119
120 count++;
121 }
122
123 i915_active_release(&active->base);
124 if (active->retired && count) {
125 pr_err("i915_active retired before submission!\n");
126 err = -EINVAL;
127 }
128 if (atomic_read(&active->base.count) != count) {
129 pr_err("i915_active not tracking all requests, found %d, expected %d\n",
130 atomic_read(&active->base.count), count);
131 err = -EINVAL;
132 }
133
134out:
135 i915_sw_fence_commit(submit);
136 heap_fence_put(submit);
137 if (err) {
138 __live_put(active);
139 active = ERR_PTR(err);
140 }
141
142 return active;
143}
144
145static int live_active_wait(void *arg)
146{
147 struct drm_i915_private *i915 = arg;
148 struct live_active *active;
149 intel_wakeref_t wakeref;
150 int err = 0;
151
152 /* Check that we get a callback when requests retire upon waiting */
153
154 mutex_lock(&i915->drm.struct_mutex);
155 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
156
157 active = __live_active_setup(i915);
158 if (IS_ERR(active)) {
159 err = PTR_ERR(active);
160 goto err;
161 }
162
163 i915_active_wait(&active->base);
164 if (!active->retired) {
165 pr_err("i915_active not retired after waiting!\n");
166 err = -EINVAL;
167 }
168
169 __live_put(active);
170
171 if (igt_flush_test(i915, I915_WAIT_LOCKED))
172 err = -EIO;
173
174err:
175 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
176 mutex_unlock(&i915->drm.struct_mutex);
177
178 return err;
179}
180
181static int live_active_retire(void *arg)
182{
183 struct drm_i915_private *i915 = arg;
184 struct live_active *active;
185 intel_wakeref_t wakeref;
186 int err = 0;
187
188 /* Check that we get a callback when requests are indirectly retired */
189
190 mutex_lock(&i915->drm.struct_mutex);
191 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
192
193 active = __live_active_setup(i915);
194 if (IS_ERR(active)) {
195 err = PTR_ERR(active);
196 goto err;
197 }
198
199 /* waits for & retires all requests */
200 if (igt_flush_test(i915, I915_WAIT_LOCKED))
201 err = -EIO;
202
203 if (!active->retired) {
204 pr_err("i915_active not retired after flushing!\n");
205 err = -EINVAL;
206 }
207
208 __live_put(active);
209
210err:
211 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
212 mutex_unlock(&i915->drm.struct_mutex);
213
214 return err;
215}
216
217int i915_active_live_selftests(struct drm_i915_private *i915)
218{
219 static const struct i915_subtest tests[] = {
220 SUBTEST(live_active_wait),
221 SUBTEST(live_active_retire),
222 };
223
224 if (intel_gt_is_wedged(&i915->gt))
225 return 0;
226
227 return i915_subtests(tests, i915);
228}