Loading...
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
32
33#include "gt/intel_rc6.h"
34#include "gt/intel_rps.h"
35#include "gt/sysfs_engines.h"
36
37#include "i915_drv.h"
38#include "i915_sysfs.h"
39#include "intel_pm.h"
40#include "intel_sideband.h"
41
42static inline struct drm_i915_private *kdev_minor_to_i915(struct device *kdev)
43{
44 struct drm_minor *minor = dev_get_drvdata(kdev);
45 return to_i915(minor->dev);
46}
47
48#ifdef CONFIG_PM
49static u32 calc_residency(struct drm_i915_private *dev_priv,
50 i915_reg_t reg)
51{
52 intel_wakeref_t wakeref;
53 u64 res = 0;
54
55 with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
56 res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg);
57
58 return DIV_ROUND_CLOSEST_ULL(res, 1000);
59}
60
61static ssize_t rc6_enable_show(struct device *kdev,
62 struct device_attribute *attr, char *buf)
63{
64 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
65 unsigned int mask;
66
67 mask = 0;
68 if (HAS_RC6(dev_priv))
69 mask |= BIT(0);
70 if (HAS_RC6p(dev_priv))
71 mask |= BIT(1);
72 if (HAS_RC6pp(dev_priv))
73 mask |= BIT(2);
74
75 return sysfs_emit(buf, "%x\n", mask);
76}
77
78static ssize_t rc6_residency_ms_show(struct device *kdev,
79 struct device_attribute *attr, char *buf)
80{
81 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
82 u32 rc6_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6);
83 return sysfs_emit(buf, "%u\n", rc6_residency);
84}
85
86static ssize_t rc6p_residency_ms_show(struct device *kdev,
87 struct device_attribute *attr, char *buf)
88{
89 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
90 u32 rc6p_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6p);
91 return sysfs_emit(buf, "%u\n", rc6p_residency);
92}
93
94static ssize_t rc6pp_residency_ms_show(struct device *kdev,
95 struct device_attribute *attr, char *buf)
96{
97 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
98 u32 rc6pp_residency = calc_residency(dev_priv, GEN6_GT_GFX_RC6pp);
99 return sysfs_emit(buf, "%u\n", rc6pp_residency);
100}
101
102static ssize_t media_rc6_residency_ms_show(struct device *kdev,
103 struct device_attribute *attr, char *buf)
104{
105 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
106 u32 rc6_residency = calc_residency(dev_priv, VLV_GT_MEDIA_RC6);
107 return sysfs_emit(buf, "%u\n", rc6_residency);
108}
109
110static DEVICE_ATTR_RO(rc6_enable);
111static DEVICE_ATTR_RO(rc6_residency_ms);
112static DEVICE_ATTR_RO(rc6p_residency_ms);
113static DEVICE_ATTR_RO(rc6pp_residency_ms);
114static DEVICE_ATTR_RO(media_rc6_residency_ms);
115
116static struct attribute *rc6_attrs[] = {
117 &dev_attr_rc6_enable.attr,
118 &dev_attr_rc6_residency_ms.attr,
119 NULL
120};
121
122static const struct attribute_group rc6_attr_group = {
123 .name = power_group_name,
124 .attrs = rc6_attrs
125};
126
127static struct attribute *rc6p_attrs[] = {
128 &dev_attr_rc6p_residency_ms.attr,
129 &dev_attr_rc6pp_residency_ms.attr,
130 NULL
131};
132
133static const struct attribute_group rc6p_attr_group = {
134 .name = power_group_name,
135 .attrs = rc6p_attrs
136};
137
138static struct attribute *media_rc6_attrs[] = {
139 &dev_attr_media_rc6_residency_ms.attr,
140 NULL
141};
142
143static const struct attribute_group media_rc6_attr_group = {
144 .name = power_group_name,
145 .attrs = media_rc6_attrs
146};
147#endif
148
149static int l3_access_valid(struct drm_i915_private *i915, loff_t offset)
150{
151 if (!HAS_L3_DPF(i915))
152 return -EPERM;
153
154 if (!IS_ALIGNED(offset, sizeof(u32)))
155 return -EINVAL;
156
157 if (offset >= GEN7_L3LOG_SIZE)
158 return -ENXIO;
159
160 return 0;
161}
162
163static ssize_t
164i915_l3_read(struct file *filp, struct kobject *kobj,
165 struct bin_attribute *attr, char *buf,
166 loff_t offset, size_t count)
167{
168 struct device *kdev = kobj_to_dev(kobj);
169 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
170 int slice = (int)(uintptr_t)attr->private;
171 int ret;
172
173 ret = l3_access_valid(i915, offset);
174 if (ret)
175 return ret;
176
177 count = round_down(count, sizeof(u32));
178 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
179 memset(buf, 0, count);
180
181 spin_lock(&i915->gem.contexts.lock);
182 if (i915->l3_parity.remap_info[slice])
183 memcpy(buf,
184 i915->l3_parity.remap_info[slice] + offset / sizeof(u32),
185 count);
186 spin_unlock(&i915->gem.contexts.lock);
187
188 return count;
189}
190
191static ssize_t
192i915_l3_write(struct file *filp, struct kobject *kobj,
193 struct bin_attribute *attr, char *buf,
194 loff_t offset, size_t count)
195{
196 struct device *kdev = kobj_to_dev(kobj);
197 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
198 int slice = (int)(uintptr_t)attr->private;
199 u32 *remap_info, *freeme = NULL;
200 struct i915_gem_context *ctx;
201 int ret;
202
203 ret = l3_access_valid(i915, offset);
204 if (ret)
205 return ret;
206
207 if (count < sizeof(u32))
208 return -EINVAL;
209
210 remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
211 if (!remap_info)
212 return -ENOMEM;
213
214 spin_lock(&i915->gem.contexts.lock);
215
216 if (i915->l3_parity.remap_info[slice]) {
217 freeme = remap_info;
218 remap_info = i915->l3_parity.remap_info[slice];
219 } else {
220 i915->l3_parity.remap_info[slice] = remap_info;
221 }
222
223 count = round_down(count, sizeof(u32));
224 memcpy(remap_info + offset / sizeof(u32), buf, count);
225
226 /* NB: We defer the remapping until we switch to the context */
227 list_for_each_entry(ctx, &i915->gem.contexts.list, link)
228 ctx->remap_slice |= BIT(slice);
229
230 spin_unlock(&i915->gem.contexts.lock);
231 kfree(freeme);
232
233 /*
234 * TODO: Ideally we really want a GPU reset here to make sure errors
235 * aren't propagated. Since I cannot find a stable way to reset the GPU
236 * at this point it is left as a TODO.
237 */
238
239 return count;
240}
241
242static const struct bin_attribute dpf_attrs = {
243 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
244 .size = GEN7_L3LOG_SIZE,
245 .read = i915_l3_read,
246 .write = i915_l3_write,
247 .mmap = NULL,
248 .private = (void *)0
249};
250
251static const struct bin_attribute dpf_attrs_1 = {
252 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
253 .size = GEN7_L3LOG_SIZE,
254 .read = i915_l3_read,
255 .write = i915_l3_write,
256 .mmap = NULL,
257 .private = (void *)1
258};
259
260static ssize_t gt_act_freq_mhz_show(struct device *kdev,
261 struct device_attribute *attr, char *buf)
262{
263 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
264 struct intel_rps *rps = &i915->gt.rps;
265
266 return sysfs_emit(buf, "%d\n", intel_rps_read_actual_frequency(rps));
267}
268
269static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
270 struct device_attribute *attr, char *buf)
271{
272 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
273 struct intel_rps *rps = &i915->gt.rps;
274
275 return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->cur_freq));
276}
277
278static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
279{
280 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
281 struct intel_rps *rps = &i915->gt.rps;
282
283 return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->boost_freq));
284}
285
286static ssize_t gt_boost_freq_mhz_store(struct device *kdev,
287 struct device_attribute *attr,
288 const char *buf, size_t count)
289{
290 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
291 struct intel_rps *rps = &dev_priv->gt.rps;
292 bool boost = false;
293 ssize_t ret;
294 u32 val;
295
296 ret = kstrtou32(buf, 0, &val);
297 if (ret)
298 return ret;
299
300 /* Validate against (static) hardware limits */
301 val = intel_freq_opcode(rps, val);
302 if (val < rps->min_freq || val > rps->max_freq)
303 return -EINVAL;
304
305 mutex_lock(&rps->lock);
306 if (val != rps->boost_freq) {
307 rps->boost_freq = val;
308 boost = atomic_read(&rps->num_waiters);
309 }
310 mutex_unlock(&rps->lock);
311 if (boost)
312 schedule_work(&rps->work);
313
314 return count;
315}
316
317static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
318 struct device_attribute *attr, char *buf)
319{
320 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
321 struct intel_rps *rps = &dev_priv->gt.rps;
322
323 return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->efficient_freq));
324}
325
326static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
327{
328 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
329 struct intel_rps *rps = &dev_priv->gt.rps;
330
331 return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->max_freq_softlimit));
332}
333
334static ssize_t gt_max_freq_mhz_store(struct device *kdev,
335 struct device_attribute *attr,
336 const char *buf, size_t count)
337{
338 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
339 struct intel_rps *rps = &dev_priv->gt.rps;
340 ssize_t ret;
341 u32 val;
342
343 ret = kstrtou32(buf, 0, &val);
344 if (ret)
345 return ret;
346
347 mutex_lock(&rps->lock);
348
349 val = intel_freq_opcode(rps, val);
350 if (val < rps->min_freq ||
351 val > rps->max_freq ||
352 val < rps->min_freq_softlimit) {
353 ret = -EINVAL;
354 goto unlock;
355 }
356
357 if (val > rps->rp0_freq)
358 DRM_DEBUG("User requested overclocking to %d\n",
359 intel_gpu_freq(rps, val));
360
361 rps->max_freq_softlimit = val;
362
363 val = clamp_t(int, rps->cur_freq,
364 rps->min_freq_softlimit,
365 rps->max_freq_softlimit);
366
367 /*
368 * We still need *_set_rps to process the new max_delay and
369 * update the interrupt limits and PMINTRMSK even though
370 * frequency request may be unchanged.
371 */
372 intel_rps_set(rps, val);
373
374unlock:
375 mutex_unlock(&rps->lock);
376
377 return ret ?: count;
378}
379
380static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
381{
382 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
383 struct intel_rps *rps = &dev_priv->gt.rps;
384
385 return sysfs_emit(buf, "%d\n", intel_gpu_freq(rps, rps->min_freq_softlimit));
386}
387
388static ssize_t gt_min_freq_mhz_store(struct device *kdev,
389 struct device_attribute *attr,
390 const char *buf, size_t count)
391{
392 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
393 struct intel_rps *rps = &dev_priv->gt.rps;
394 ssize_t ret;
395 u32 val;
396
397 ret = kstrtou32(buf, 0, &val);
398 if (ret)
399 return ret;
400
401 mutex_lock(&rps->lock);
402
403 val = intel_freq_opcode(rps, val);
404 if (val < rps->min_freq ||
405 val > rps->max_freq ||
406 val > rps->max_freq_softlimit) {
407 ret = -EINVAL;
408 goto unlock;
409 }
410
411 rps->min_freq_softlimit = val;
412
413 val = clamp_t(int, rps->cur_freq,
414 rps->min_freq_softlimit,
415 rps->max_freq_softlimit);
416
417 /*
418 * We still need *_set_rps to process the new min_delay and
419 * update the interrupt limits and PMINTRMSK even though
420 * frequency request may be unchanged.
421 */
422 intel_rps_set(rps, val);
423
424unlock:
425 mutex_unlock(&rps->lock);
426
427 return ret ?: count;
428}
429
430static DEVICE_ATTR_RO(gt_act_freq_mhz);
431static DEVICE_ATTR_RO(gt_cur_freq_mhz);
432static DEVICE_ATTR_RW(gt_boost_freq_mhz);
433static DEVICE_ATTR_RW(gt_max_freq_mhz);
434static DEVICE_ATTR_RW(gt_min_freq_mhz);
435
436static DEVICE_ATTR_RO(vlv_rpe_freq_mhz);
437
438static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
439static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
440static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
441static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
442
443/* For now we have a static number of RP states */
444static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
445{
446 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
447 struct intel_rps *rps = &dev_priv->gt.rps;
448 u32 val;
449
450 if (attr == &dev_attr_gt_RP0_freq_mhz)
451 val = intel_gpu_freq(rps, rps->rp0_freq);
452 else if (attr == &dev_attr_gt_RP1_freq_mhz)
453 val = intel_gpu_freq(rps, rps->rp1_freq);
454 else if (attr == &dev_attr_gt_RPn_freq_mhz)
455 val = intel_gpu_freq(rps, rps->min_freq);
456 else
457 BUG();
458
459 return sysfs_emit(buf, "%d\n", val);
460}
461
462static const struct attribute * const gen6_attrs[] = {
463 &dev_attr_gt_act_freq_mhz.attr,
464 &dev_attr_gt_cur_freq_mhz.attr,
465 &dev_attr_gt_boost_freq_mhz.attr,
466 &dev_attr_gt_max_freq_mhz.attr,
467 &dev_attr_gt_min_freq_mhz.attr,
468 &dev_attr_gt_RP0_freq_mhz.attr,
469 &dev_attr_gt_RP1_freq_mhz.attr,
470 &dev_attr_gt_RPn_freq_mhz.attr,
471 NULL,
472};
473
474static const struct attribute * const vlv_attrs[] = {
475 &dev_attr_gt_act_freq_mhz.attr,
476 &dev_attr_gt_cur_freq_mhz.attr,
477 &dev_attr_gt_boost_freq_mhz.attr,
478 &dev_attr_gt_max_freq_mhz.attr,
479 &dev_attr_gt_min_freq_mhz.attr,
480 &dev_attr_gt_RP0_freq_mhz.attr,
481 &dev_attr_gt_RP1_freq_mhz.attr,
482 &dev_attr_gt_RPn_freq_mhz.attr,
483 &dev_attr_vlv_rpe_freq_mhz.attr,
484 NULL,
485};
486
487#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
488
489static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
490 struct bin_attribute *attr, char *buf,
491 loff_t off, size_t count)
492{
493
494 struct device *kdev = kobj_to_dev(kobj);
495 struct drm_i915_private *i915 = kdev_minor_to_i915(kdev);
496 struct i915_gpu_coredump *gpu;
497 ssize_t ret;
498
499 gpu = i915_first_error_state(i915);
500 if (IS_ERR(gpu)) {
501 ret = PTR_ERR(gpu);
502 } else if (gpu) {
503 ret = i915_gpu_coredump_copy_to_buffer(gpu, buf, off, count);
504 i915_gpu_coredump_put(gpu);
505 } else {
506 const char *str = "No error state collected\n";
507 size_t len = strlen(str);
508
509 ret = min_t(size_t, count, len - off);
510 memcpy(buf, str + off, ret);
511 }
512
513 return ret;
514}
515
516static ssize_t error_state_write(struct file *file, struct kobject *kobj,
517 struct bin_attribute *attr, char *buf,
518 loff_t off, size_t count)
519{
520 struct device *kdev = kobj_to_dev(kobj);
521 struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
522
523 drm_dbg(&dev_priv->drm, "Resetting error state\n");
524 i915_reset_error_state(dev_priv);
525
526 return count;
527}
528
529static const struct bin_attribute error_state_attr = {
530 .attr.name = "error",
531 .attr.mode = S_IRUSR | S_IWUSR,
532 .size = 0,
533 .read = error_state_read,
534 .write = error_state_write,
535};
536
537static void i915_setup_error_capture(struct device *kdev)
538{
539 if (sysfs_create_bin_file(&kdev->kobj, &error_state_attr))
540 DRM_ERROR("error_state sysfs setup failed\n");
541}
542
543static void i915_teardown_error_capture(struct device *kdev)
544{
545 sysfs_remove_bin_file(&kdev->kobj, &error_state_attr);
546}
547#else
548static void i915_setup_error_capture(struct device *kdev) {}
549static void i915_teardown_error_capture(struct device *kdev) {}
550#endif
551
552void i915_setup_sysfs(struct drm_i915_private *dev_priv)
553{
554 struct device *kdev = dev_priv->drm.primary->kdev;
555 int ret;
556
557#ifdef CONFIG_PM
558 if (HAS_RC6(dev_priv)) {
559 ret = sysfs_merge_group(&kdev->kobj,
560 &rc6_attr_group);
561 if (ret)
562 drm_err(&dev_priv->drm,
563 "RC6 residency sysfs setup failed\n");
564 }
565 if (HAS_RC6p(dev_priv)) {
566 ret = sysfs_merge_group(&kdev->kobj,
567 &rc6p_attr_group);
568 if (ret)
569 drm_err(&dev_priv->drm,
570 "RC6p residency sysfs setup failed\n");
571 }
572 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
573 ret = sysfs_merge_group(&kdev->kobj,
574 &media_rc6_attr_group);
575 if (ret)
576 drm_err(&dev_priv->drm,
577 "Media RC6 residency sysfs setup failed\n");
578 }
579#endif
580 if (HAS_L3_DPF(dev_priv)) {
581 ret = device_create_bin_file(kdev, &dpf_attrs);
582 if (ret)
583 drm_err(&dev_priv->drm,
584 "l3 parity sysfs setup failed\n");
585
586 if (NUM_L3_SLICES(dev_priv) > 1) {
587 ret = device_create_bin_file(kdev,
588 &dpf_attrs_1);
589 if (ret)
590 drm_err(&dev_priv->drm,
591 "l3 parity slice 1 setup failed\n");
592 }
593 }
594
595 ret = 0;
596 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
597 ret = sysfs_create_files(&kdev->kobj, vlv_attrs);
598 else if (GRAPHICS_VER(dev_priv) >= 6)
599 ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
600 if (ret)
601 drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
602
603 i915_setup_error_capture(kdev);
604
605 intel_engines_add_sysfs(dev_priv);
606}
607
608void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
609{
610 struct device *kdev = dev_priv->drm.primary->kdev;
611
612 i915_teardown_error_capture(kdev);
613
614 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
615 sysfs_remove_files(&kdev->kobj, vlv_attrs);
616 else
617 sysfs_remove_files(&kdev->kobj, gen6_attrs);
618 device_remove_bin_file(kdev, &dpf_attrs_1);
619 device_remove_bin_file(kdev, &dpf_attrs);
620#ifdef CONFIG_PM
621 sysfs_unmerge_group(&kdev->kobj, &rc6_attr_group);
622 sysfs_unmerge_group(&kdev->kobj, &rc6p_attr_group);
623#endif
624}
1/*
2 * Copyright © 2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Ben Widawsky <ben@bwidawsk.net>
25 *
26 */
27
28#include <linux/device.h>
29#include <linux/module.h>
30#include <linux/stat.h>
31#include <linux/sysfs.h>
32#include "intel_drv.h"
33#include "i915_drv.h"
34
35#define dev_to_drm_minor(d) dev_get_drvdata((d))
36
37#ifdef CONFIG_PM
38static u32 calc_residency(struct drm_device *dev,
39 i915_reg_t reg)
40{
41 struct drm_i915_private *dev_priv = dev->dev_private;
42 u64 raw_time; /* 32b value may overflow during fixed point math */
43 u64 units = 128ULL, div = 100000ULL;
44 u32 ret;
45
46 if (!intel_enable_rc6(dev))
47 return 0;
48
49 intel_runtime_pm_get(dev_priv);
50
51 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
52 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
53 units = 1;
54 div = dev_priv->czclk_freq;
55
56 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
57 units <<= 8;
58 } else if (IS_BROXTON(dev)) {
59 units = 1;
60 div = 1200; /* 833.33ns */
61 }
62
63 raw_time = I915_READ(reg) * units;
64 ret = DIV_ROUND_UP_ULL(raw_time, div);
65
66 intel_runtime_pm_put(dev_priv);
67 return ret;
68}
69
70static ssize_t
71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
72{
73 struct drm_minor *dminor = dev_to_drm_minor(kdev);
74 return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
75}
76
77static ssize_t
78show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
79{
80 struct drm_minor *dminor = dev_get_drvdata(kdev);
81 u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
82 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
83}
84
85static ssize_t
86show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
87{
88 struct drm_minor *dminor = dev_to_drm_minor(kdev);
89 u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
90 return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
91}
92
93static ssize_t
94show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
95{
96 struct drm_minor *dminor = dev_to_drm_minor(kdev);
97 u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
98 return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
99}
100
101static ssize_t
102show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
103{
104 struct drm_minor *dminor = dev_get_drvdata(kdev);
105 u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
106 return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
107}
108
109static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
110static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
111static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
112static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
113static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
114
115static struct attribute *rc6_attrs[] = {
116 &dev_attr_rc6_enable.attr,
117 &dev_attr_rc6_residency_ms.attr,
118 NULL
119};
120
121static struct attribute_group rc6_attr_group = {
122 .name = power_group_name,
123 .attrs = rc6_attrs
124};
125
126static struct attribute *rc6p_attrs[] = {
127 &dev_attr_rc6p_residency_ms.attr,
128 &dev_attr_rc6pp_residency_ms.attr,
129 NULL
130};
131
132static struct attribute_group rc6p_attr_group = {
133 .name = power_group_name,
134 .attrs = rc6p_attrs
135};
136
137static struct attribute *media_rc6_attrs[] = {
138 &dev_attr_media_rc6_residency_ms.attr,
139 NULL
140};
141
142static struct attribute_group media_rc6_attr_group = {
143 .name = power_group_name,
144 .attrs = media_rc6_attrs
145};
146#endif
147
148static int l3_access_valid(struct drm_device *dev, loff_t offset)
149{
150 if (!HAS_L3_DPF(dev))
151 return -EPERM;
152
153 if (offset % 4 != 0)
154 return -EINVAL;
155
156 if (offset >= GEN7_L3LOG_SIZE)
157 return -ENXIO;
158
159 return 0;
160}
161
162static ssize_t
163i915_l3_read(struct file *filp, struct kobject *kobj,
164 struct bin_attribute *attr, char *buf,
165 loff_t offset, size_t count)
166{
167 struct device *dev = kobj_to_dev(kobj);
168 struct drm_minor *dminor = dev_to_drm_minor(dev);
169 struct drm_device *drm_dev = dminor->dev;
170 struct drm_i915_private *dev_priv = drm_dev->dev_private;
171 int slice = (int)(uintptr_t)attr->private;
172 int ret;
173
174 count = round_down(count, 4);
175
176 ret = l3_access_valid(drm_dev, offset);
177 if (ret)
178 return ret;
179
180 count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
181
182 ret = i915_mutex_lock_interruptible(drm_dev);
183 if (ret)
184 return ret;
185
186 if (dev_priv->l3_parity.remap_info[slice])
187 memcpy(buf,
188 dev_priv->l3_parity.remap_info[slice] + (offset/4),
189 count);
190 else
191 memset(buf, 0, count);
192
193 mutex_unlock(&drm_dev->struct_mutex);
194
195 return count;
196}
197
198static ssize_t
199i915_l3_write(struct file *filp, struct kobject *kobj,
200 struct bin_attribute *attr, char *buf,
201 loff_t offset, size_t count)
202{
203 struct device *dev = kobj_to_dev(kobj);
204 struct drm_minor *dminor = dev_to_drm_minor(dev);
205 struct drm_device *drm_dev = dminor->dev;
206 struct drm_i915_private *dev_priv = drm_dev->dev_private;
207 struct intel_context *ctx;
208 u32 *temp = NULL; /* Just here to make handling failures easy */
209 int slice = (int)(uintptr_t)attr->private;
210 int ret;
211
212 if (!HAS_HW_CONTEXTS(drm_dev))
213 return -ENXIO;
214
215 ret = l3_access_valid(drm_dev, offset);
216 if (ret)
217 return ret;
218
219 ret = i915_mutex_lock_interruptible(drm_dev);
220 if (ret)
221 return ret;
222
223 if (!dev_priv->l3_parity.remap_info[slice]) {
224 temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
225 if (!temp) {
226 mutex_unlock(&drm_dev->struct_mutex);
227 return -ENOMEM;
228 }
229 }
230
231 ret = i915_gpu_idle(drm_dev);
232 if (ret) {
233 kfree(temp);
234 mutex_unlock(&drm_dev->struct_mutex);
235 return ret;
236 }
237
238 /* TODO: Ideally we really want a GPU reset here to make sure errors
239 * aren't propagated. Since I cannot find a stable way to reset the GPU
240 * at this point it is left as a TODO.
241 */
242 if (temp)
243 dev_priv->l3_parity.remap_info[slice] = temp;
244
245 memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
246
247 /* NB: We defer the remapping until we switch to the context */
248 list_for_each_entry(ctx, &dev_priv->context_list, link)
249 ctx->remap_slice |= (1<<slice);
250
251 mutex_unlock(&drm_dev->struct_mutex);
252
253 return count;
254}
255
256static struct bin_attribute dpf_attrs = {
257 .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
258 .size = GEN7_L3LOG_SIZE,
259 .read = i915_l3_read,
260 .write = i915_l3_write,
261 .mmap = NULL,
262 .private = (void *)0
263};
264
265static struct bin_attribute dpf_attrs_1 = {
266 .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
267 .size = GEN7_L3LOG_SIZE,
268 .read = i915_l3_read,
269 .write = i915_l3_write,
270 .mmap = NULL,
271 .private = (void *)1
272};
273
274static ssize_t gt_act_freq_mhz_show(struct device *kdev,
275 struct device_attribute *attr, char *buf)
276{
277 struct drm_minor *minor = dev_to_drm_minor(kdev);
278 struct drm_device *dev = minor->dev;
279 struct drm_i915_private *dev_priv = dev->dev_private;
280 int ret;
281
282 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
283
284 intel_runtime_pm_get(dev_priv);
285
286 mutex_lock(&dev_priv->rps.hw_lock);
287 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
288 u32 freq;
289 freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
290 ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
291 } else {
292 u32 rpstat = I915_READ(GEN6_RPSTAT1);
293 if (IS_GEN9(dev_priv))
294 ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
295 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
296 ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
297 else
298 ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
299 ret = intel_gpu_freq(dev_priv, ret);
300 }
301 mutex_unlock(&dev_priv->rps.hw_lock);
302
303 intel_runtime_pm_put(dev_priv);
304
305 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
306}
307
308static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
309 struct device_attribute *attr, char *buf)
310{
311 struct drm_minor *minor = dev_to_drm_minor(kdev);
312 struct drm_device *dev = minor->dev;
313 struct drm_i915_private *dev_priv = dev->dev_private;
314 int ret;
315
316 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
317
318 intel_runtime_pm_get(dev_priv);
319
320 mutex_lock(&dev_priv->rps.hw_lock);
321 ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
322 mutex_unlock(&dev_priv->rps.hw_lock);
323
324 intel_runtime_pm_put(dev_priv);
325
326 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
327}
328
329static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
330 struct device_attribute *attr, char *buf)
331{
332 struct drm_minor *minor = dev_to_drm_minor(kdev);
333 struct drm_device *dev = minor->dev;
334 struct drm_i915_private *dev_priv = dev->dev_private;
335
336 return snprintf(buf, PAGE_SIZE,
337 "%d\n",
338 intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
339}
340
341static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
342{
343 struct drm_minor *minor = dev_to_drm_minor(kdev);
344 struct drm_device *dev = minor->dev;
345 struct drm_i915_private *dev_priv = dev->dev_private;
346 int ret;
347
348 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
349
350 mutex_lock(&dev_priv->rps.hw_lock);
351 ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
352 mutex_unlock(&dev_priv->rps.hw_lock);
353
354 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
355}
356
357static ssize_t gt_max_freq_mhz_store(struct device *kdev,
358 struct device_attribute *attr,
359 const char *buf, size_t count)
360{
361 struct drm_minor *minor = dev_to_drm_minor(kdev);
362 struct drm_device *dev = minor->dev;
363 struct drm_i915_private *dev_priv = dev->dev_private;
364 u32 val;
365 ssize_t ret;
366
367 ret = kstrtou32(buf, 0, &val);
368 if (ret)
369 return ret;
370
371 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
372
373 mutex_lock(&dev_priv->rps.hw_lock);
374
375 val = intel_freq_opcode(dev_priv, val);
376
377 if (val < dev_priv->rps.min_freq ||
378 val > dev_priv->rps.max_freq ||
379 val < dev_priv->rps.min_freq_softlimit) {
380 mutex_unlock(&dev_priv->rps.hw_lock);
381 return -EINVAL;
382 }
383
384 if (val > dev_priv->rps.rp0_freq)
385 DRM_DEBUG("User requested overclocking to %d\n",
386 intel_gpu_freq(dev_priv, val));
387
388 dev_priv->rps.max_freq_softlimit = val;
389
390 val = clamp_t(int, dev_priv->rps.cur_freq,
391 dev_priv->rps.min_freq_softlimit,
392 dev_priv->rps.max_freq_softlimit);
393
394 /* We still need *_set_rps to process the new max_delay and
395 * update the interrupt limits and PMINTRMSK even though
396 * frequency request may be unchanged. */
397 intel_set_rps(dev, val);
398
399 mutex_unlock(&dev_priv->rps.hw_lock);
400
401 return count;
402}
403
404static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
405{
406 struct drm_minor *minor = dev_to_drm_minor(kdev);
407 struct drm_device *dev = minor->dev;
408 struct drm_i915_private *dev_priv = dev->dev_private;
409 int ret;
410
411 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
412
413 mutex_lock(&dev_priv->rps.hw_lock);
414 ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
415 mutex_unlock(&dev_priv->rps.hw_lock);
416
417 return snprintf(buf, PAGE_SIZE, "%d\n", ret);
418}
419
420static ssize_t gt_min_freq_mhz_store(struct device *kdev,
421 struct device_attribute *attr,
422 const char *buf, size_t count)
423{
424 struct drm_minor *minor = dev_to_drm_minor(kdev);
425 struct drm_device *dev = minor->dev;
426 struct drm_i915_private *dev_priv = dev->dev_private;
427 u32 val;
428 ssize_t ret;
429
430 ret = kstrtou32(buf, 0, &val);
431 if (ret)
432 return ret;
433
434 flush_delayed_work(&dev_priv->rps.delayed_resume_work);
435
436 mutex_lock(&dev_priv->rps.hw_lock);
437
438 val = intel_freq_opcode(dev_priv, val);
439
440 if (val < dev_priv->rps.min_freq ||
441 val > dev_priv->rps.max_freq ||
442 val > dev_priv->rps.max_freq_softlimit) {
443 mutex_unlock(&dev_priv->rps.hw_lock);
444 return -EINVAL;
445 }
446
447 dev_priv->rps.min_freq_softlimit = val;
448
449 val = clamp_t(int, dev_priv->rps.cur_freq,
450 dev_priv->rps.min_freq_softlimit,
451 dev_priv->rps.max_freq_softlimit);
452
453 /* We still need *_set_rps to process the new min_delay and
454 * update the interrupt limits and PMINTRMSK even though
455 * frequency request may be unchanged. */
456 intel_set_rps(dev, val);
457
458 mutex_unlock(&dev_priv->rps.hw_lock);
459
460 return count;
461
462}
463
464static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
465static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
466static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
467static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
468
469static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
470
471static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
472static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
473static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
474static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
475
476/* For now we have a static number of RP states */
477static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
478{
479 struct drm_minor *minor = dev_to_drm_minor(kdev);
480 struct drm_device *dev = minor->dev;
481 struct drm_i915_private *dev_priv = dev->dev_private;
482 u32 val;
483
484 if (attr == &dev_attr_gt_RP0_freq_mhz)
485 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
486 else if (attr == &dev_attr_gt_RP1_freq_mhz)
487 val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
488 else if (attr == &dev_attr_gt_RPn_freq_mhz)
489 val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
490 else
491 BUG();
492
493 return snprintf(buf, PAGE_SIZE, "%d\n", val);
494}
495
496static const struct attribute *gen6_attrs[] = {
497 &dev_attr_gt_act_freq_mhz.attr,
498 &dev_attr_gt_cur_freq_mhz.attr,
499 &dev_attr_gt_max_freq_mhz.attr,
500 &dev_attr_gt_min_freq_mhz.attr,
501 &dev_attr_gt_RP0_freq_mhz.attr,
502 &dev_attr_gt_RP1_freq_mhz.attr,
503 &dev_attr_gt_RPn_freq_mhz.attr,
504 NULL,
505};
506
507static const struct attribute *vlv_attrs[] = {
508 &dev_attr_gt_act_freq_mhz.attr,
509 &dev_attr_gt_cur_freq_mhz.attr,
510 &dev_attr_gt_max_freq_mhz.attr,
511 &dev_attr_gt_min_freq_mhz.attr,
512 &dev_attr_gt_RP0_freq_mhz.attr,
513 &dev_attr_gt_RP1_freq_mhz.attr,
514 &dev_attr_gt_RPn_freq_mhz.attr,
515 &dev_attr_vlv_rpe_freq_mhz.attr,
516 NULL,
517};
518
519static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
520 struct bin_attribute *attr, char *buf,
521 loff_t off, size_t count)
522{
523
524 struct device *kdev = kobj_to_dev(kobj);
525 struct drm_minor *minor = dev_to_drm_minor(kdev);
526 struct drm_device *dev = minor->dev;
527 struct i915_error_state_file_priv error_priv;
528 struct drm_i915_error_state_buf error_str;
529 ssize_t ret_count = 0;
530 int ret;
531
532 memset(&error_priv, 0, sizeof(error_priv));
533
534 ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
535 if (ret)
536 return ret;
537
538 error_priv.dev = dev;
539 i915_error_state_get(dev, &error_priv);
540
541 ret = i915_error_state_to_str(&error_str, &error_priv);
542 if (ret)
543 goto out;
544
545 ret_count = count < error_str.bytes ? count : error_str.bytes;
546
547 memcpy(buf, error_str.buf, ret_count);
548out:
549 i915_error_state_put(&error_priv);
550 i915_error_state_buf_release(&error_str);
551
552 return ret ?: ret_count;
553}
554
555static ssize_t error_state_write(struct file *file, struct kobject *kobj,
556 struct bin_attribute *attr, char *buf,
557 loff_t off, size_t count)
558{
559 struct device *kdev = kobj_to_dev(kobj);
560 struct drm_minor *minor = dev_to_drm_minor(kdev);
561 struct drm_device *dev = minor->dev;
562 int ret;
563
564 DRM_DEBUG_DRIVER("Resetting error state\n");
565
566 ret = mutex_lock_interruptible(&dev->struct_mutex);
567 if (ret)
568 return ret;
569
570 i915_destroy_error_state(dev);
571 mutex_unlock(&dev->struct_mutex);
572
573 return count;
574}
575
576static struct bin_attribute error_state_attr = {
577 .attr.name = "error",
578 .attr.mode = S_IRUSR | S_IWUSR,
579 .size = 0,
580 .read = error_state_read,
581 .write = error_state_write,
582};
583
584void i915_setup_sysfs(struct drm_device *dev)
585{
586 int ret;
587
588#ifdef CONFIG_PM
589 if (HAS_RC6(dev)) {
590 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
591 &rc6_attr_group);
592 if (ret)
593 DRM_ERROR("RC6 residency sysfs setup failed\n");
594 }
595 if (HAS_RC6p(dev)) {
596 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
597 &rc6p_attr_group);
598 if (ret)
599 DRM_ERROR("RC6p residency sysfs setup failed\n");
600 }
601 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
602 ret = sysfs_merge_group(&dev->primary->kdev->kobj,
603 &media_rc6_attr_group);
604 if (ret)
605 DRM_ERROR("Media RC6 residency sysfs setup failed\n");
606 }
607#endif
608 if (HAS_L3_DPF(dev)) {
609 ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
610 if (ret)
611 DRM_ERROR("l3 parity sysfs setup failed\n");
612
613 if (NUM_L3_SLICES(dev) > 1) {
614 ret = device_create_bin_file(dev->primary->kdev,
615 &dpf_attrs_1);
616 if (ret)
617 DRM_ERROR("l3 parity slice 1 setup failed\n");
618 }
619 }
620
621 ret = 0;
622 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
623 ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
624 else if (INTEL_INFO(dev)->gen >= 6)
625 ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
626 if (ret)
627 DRM_ERROR("RPS sysfs setup failed\n");
628
629 ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
630 &error_state_attr);
631 if (ret)
632 DRM_ERROR("error_state sysfs setup failed\n");
633}
634
635void i915_teardown_sysfs(struct drm_device *dev)
636{
637 sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
638 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
639 sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
640 else
641 sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
642 device_remove_bin_file(dev->primary->kdev, &dpf_attrs_1);
643 device_remove_bin_file(dev->primary->kdev, &dpf_attrs);
644#ifdef CONFIG_PM
645 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
646 sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
647#endif
648}