Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * SPDX-License-Identifier: MIT
  3 *
  4 * Copyright © 2019 Intel Corporation
  5 */
  6
  7#include "gem/i915_gem_pm.h"
  8#include "gt/intel_gt.h"
  9#include "gt/intel_gt_pm.h"
 10#include "gt/intel_gt_requests.h"
 11
 12#include "i915_drv.h"
 13
 14#if defined(CONFIG_X86)
 15#include <asm/smp.h>
 16#else
 17#define wbinvd_on_all_cpus() \
 18	pr_warn(DRIVER_NAME ": Missing cache flush in %s\n", __func__)
 19#endif
 20
 21void i915_gem_suspend(struct drm_i915_private *i915)
 22{
 23	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
 24
 25	intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0);
 26	flush_workqueue(i915->wq);
 27
 28	/*
 29	 * We have to flush all the executing contexts to main memory so
 30	 * that they can saved in the hibernation image. To ensure the last
 31	 * context image is coherent, we have to switch away from it. That
 32	 * leaves the i915->kernel_context still active when
 33	 * we actually suspend, and its image in memory may not match the GPU
 34	 * state. Fortunately, the kernel_context is disposable and we do
 35	 * not rely on its state.
 36	 */
 37	intel_gt_suspend_prepare(&i915->gt);
 38
 39	i915_gem_drain_freed_objects(i915);
 40}
 41
 42void i915_gem_suspend_late(struct drm_i915_private *i915)
 43{
 44	struct drm_i915_gem_object *obj;
 45	struct list_head *phases[] = {
 46		&i915->mm.shrink_list,
 47		&i915->mm.purge_list,
 48		NULL
 49	}, **phase;
 50	unsigned long flags;
 51	bool flush = false;
 52
 53	/*
 54	 * Neither the BIOS, ourselves or any other kernel
 55	 * expects the system to be in execlists mode on startup,
 56	 * so we need to reset the GPU back to legacy mode. And the only
 57	 * known way to disable logical contexts is through a GPU reset.
 58	 *
 59	 * So in order to leave the system in a known default configuration,
 60	 * always reset the GPU upon unload and suspend. Afterwards we then
 61	 * clean up the GEM state tracking, flushing off the requests and
 62	 * leaving the system in a known idle state.
 63	 *
 64	 * Note that is of the upmost importance that the GPU is idle and
 65	 * all stray writes are flushed *before* we dismantle the backing
 66	 * storage for the pinned objects.
 67	 *
 68	 * However, since we are uncertain that resetting the GPU on older
 69	 * machines is a good idea, we don't - just in case it leaves the
 70	 * machine in an unusable condition.
 71	 */
 72
 73	intel_gt_suspend_late(&i915->gt);
 74
 75	spin_lock_irqsave(&i915->mm.obj_lock, flags);
 76	for (phase = phases; *phase; phase++) {
 77		list_for_each_entry(obj, *phase, mm.link) {
 78			if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
 79				flush |= (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0;
 80			__start_cpu_write(obj); /* presume auto-hibernate */
 81		}
 82	}
 83	spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
 84	if (flush)
 85		wbinvd_on_all_cpus();
 86}
 87
 88int i915_gem_freeze(struct drm_i915_private *i915)
 89{
 90	/* Discard all purgeable objects, let userspace recover those as
 91	 * required after resuming.
 92	 */
 93	i915_gem_shrink_all(i915);
 94
 95	return 0;
 96}
 97
 98int i915_gem_freeze_late(struct drm_i915_private *i915)
 99{
100	struct drm_i915_gem_object *obj;
101	intel_wakeref_t wakeref;
102
103	/*
104	 * Called just before we write the hibernation image.
105	 *
106	 * We need to update the domain tracking to reflect that the CPU
107	 * will be accessing all the pages to create and restore from the
108	 * hibernation, and so upon restoration those pages will be in the
109	 * CPU domain.
110	 *
111	 * To make sure the hibernation image contains the latest state,
112	 * we update that state just before writing out the image.
113	 *
114	 * To try and reduce the hibernation image, we manually shrink
115	 * the objects as well, see i915_gem_freeze()
116	 */
117
118	with_intel_runtime_pm(&i915->runtime_pm, wakeref)
119		i915_gem_shrink(NULL, i915, -1UL, NULL, ~0);
120	i915_gem_drain_freed_objects(i915);
121
122	wbinvd_on_all_cpus();
123	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link)
124		__start_cpu_write(obj);
125
126	return 0;
127}
128
129void i915_gem_resume(struct drm_i915_private *i915)
130{
131	GEM_TRACE("%s\n", dev_name(i915->drm.dev));
132
133	/*
134	 * As we didn't flush the kernel context before suspend, we cannot
135	 * guarantee that the context image is complete. So let's just reset
136	 * it and start again.
137	 */
138	intel_gt_resume(&i915->gt);
139}