Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * Copyright (c) 2014-2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 */
 23
 24#ifndef __INTEL_FRONTBUFFER_H__
 25#define __INTEL_FRONTBUFFER_H__
 26
 27#include <linux/atomic.h>
 28#include <linux/bits.h>
 29#include <linux/kref.h>
 30
 31#include "gem/i915_gem_object_types.h"
 32#include "i915_active_types.h"
 33
 34struct drm_i915_private;
 35
 36enum fb_op_origin {
 37	ORIGIN_CPU = 0,
 38	ORIGIN_CS,
 39	ORIGIN_FLIP,
 40	ORIGIN_DIRTYFB,
 41	ORIGIN_CURSOR_UPDATE,
 42};
 43
 44struct intel_frontbuffer {
 45	struct kref ref;
 46	atomic_t bits;
 47	struct i915_active write;
 48	struct drm_i915_gem_object *obj;
 49	struct rcu_head rcu;
 
 
 50};
 51
 52/*
 53 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
 54 * considered to be the frontbuffer for the given plane interface-wise. This
 55 * doesn't mean that the hw necessarily already scans it out, but that any
 56 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
 57 *
 58 * We have one bit per pipe and per scanout plane type.
 59 */
 60#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
 61#define INTEL_FRONTBUFFER(pipe, plane_id) \
 62	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
 63#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
 64	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
 65#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
 66	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1,	\
 67		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
 68
 69void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
 70				    unsigned frontbuffer_bits);
 71void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 72				     unsigned frontbuffer_bits);
 73void intel_frontbuffer_flip(struct drm_i915_private *i915,
 74			    unsigned frontbuffer_bits);
 75
 76void intel_frontbuffer_put(struct intel_frontbuffer *front);
 77
 78static inline struct intel_frontbuffer *
 79__intel_frontbuffer_get(const struct drm_i915_gem_object *obj)
 80{
 81	struct intel_frontbuffer *front;
 82
 83	if (likely(!rcu_access_pointer(obj->frontbuffer)))
 84		return NULL;
 85
 86	rcu_read_lock();
 87	do {
 88		front = rcu_dereference(obj->frontbuffer);
 89		if (!front)
 90			break;
 91
 92		if (unlikely(!kref_get_unless_zero(&front->ref)))
 93			continue;
 94
 95		if (likely(front == rcu_access_pointer(obj->frontbuffer)))
 96			break;
 97
 98		intel_frontbuffer_put(front);
 99	} while (1);
100	rcu_read_unlock();
101
102	return front;
103}
104
105struct intel_frontbuffer *
106intel_frontbuffer_get(struct drm_i915_gem_object *obj);
107
108void __intel_fb_invalidate(struct intel_frontbuffer *front,
109			   enum fb_op_origin origin,
110			   unsigned int frontbuffer_bits);
111
112/**
113 * intel_frontbuffer_invalidate - invalidate frontbuffer object
114 * @front: GEM object to invalidate
115 * @origin: which operation caused the invalidation
116 *
117 * This function gets called every time rendering on the given object starts and
118 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
119 * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
120 * until the rendering completes or a flip on this frontbuffer plane is
121 * scheduled.
122 */
123static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
124						enum fb_op_origin origin)
125{
126	unsigned int frontbuffer_bits;
127
128	if (!front)
129		return false;
130
131	frontbuffer_bits = atomic_read(&front->bits);
132	if (!frontbuffer_bits)
133		return false;
134
135	__intel_fb_invalidate(front, origin, frontbuffer_bits);
136	return true;
137}
138
139void __intel_fb_flush(struct intel_frontbuffer *front,
140		      enum fb_op_origin origin,
141		      unsigned int frontbuffer_bits);
142
143/**
144 * intel_frontbuffer_flush - flush frontbuffer object
145 * @front: GEM object to flush
146 * @origin: which operation caused the flush
147 *
148 * This function gets called every time rendering on the given object has
149 * completed and frontbuffer caching can be started again.
150 */
151static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
152					   enum fb_op_origin origin)
153{
154	unsigned int frontbuffer_bits;
155
156	if (!front)
157		return;
158
159	frontbuffer_bits = atomic_read(&front->bits);
160	if (!frontbuffer_bits)
161		return;
162
163	__intel_fb_flush(front, origin, frontbuffer_bits);
164}
 
 
165
166void intel_frontbuffer_track(struct intel_frontbuffer *old,
167			     struct intel_frontbuffer *new,
168			     unsigned int frontbuffer_bits);
169
170#endif /* __INTEL_FRONTBUFFER_H__ */
v6.9.4
  1/*
  2 * Copyright (c) 2014-2016 Intel Corporation
  3 *
  4 * Permission is hereby granted, free of charge, to any person obtaining a
  5 * copy of this software and associated documentation files (the "Software"),
  6 * to deal in the Software without restriction, including without limitation
  7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8 * and/or sell copies of the Software, and to permit persons to whom the
  9 * Software is furnished to do so, subject to the following conditions:
 10 *
 11 * The above copyright notice and this permission notice (including the next
 12 * paragraph) shall be included in all copies or substantial portions of the
 13 * Software.
 14 *
 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 21 * IN THE SOFTWARE.
 22 */
 23
 24#ifndef __INTEL_FRONTBUFFER_H__
 25#define __INTEL_FRONTBUFFER_H__
 26
 27#include <linux/atomic.h>
 28#include <linux/bits.h>
 29#include <linux/kref.h>
 30
 
 31#include "i915_active_types.h"
 32
 33struct drm_i915_private;
 34
 35enum fb_op_origin {
 36	ORIGIN_CPU = 0,
 37	ORIGIN_CS,
 38	ORIGIN_FLIP,
 39	ORIGIN_DIRTYFB,
 40	ORIGIN_CURSOR_UPDATE,
 41};
 42
 43struct intel_frontbuffer {
 44	struct kref ref;
 45	atomic_t bits;
 46	struct i915_active write;
 47	struct drm_i915_gem_object *obj;
 48	struct rcu_head rcu;
 49
 50	struct work_struct flush_work;
 51};
 52
 53/*
 54 * Frontbuffer tracking bits. Set in obj->frontbuffer_bits while a gem bo is
 55 * considered to be the frontbuffer for the given plane interface-wise. This
 56 * doesn't mean that the hw necessarily already scans it out, but that any
 57 * rendering (by the cpu or gpu) will land in the frontbuffer eventually.
 58 *
 59 * We have one bit per pipe and per scanout plane type.
 60 */
 61#define INTEL_FRONTBUFFER_BITS_PER_PIPE 8
 62#define INTEL_FRONTBUFFER(pipe, plane_id) \
 63	BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe));
 64#define INTEL_FRONTBUFFER_OVERLAY(pipe) \
 65	BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
 66#define INTEL_FRONTBUFFER_ALL_MASK(pipe) \
 67	GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1,	\
 68		INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))
 69
 70void intel_frontbuffer_flip_prepare(struct drm_i915_private *i915,
 71				    unsigned frontbuffer_bits);
 72void intel_frontbuffer_flip_complete(struct drm_i915_private *i915,
 73				     unsigned frontbuffer_bits);
 74void intel_frontbuffer_flip(struct drm_i915_private *i915,
 75			    unsigned frontbuffer_bits);
 76
 77void intel_frontbuffer_put(struct intel_frontbuffer *front);
 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79struct intel_frontbuffer *
 80intel_frontbuffer_get(struct drm_i915_gem_object *obj);
 81
 82void __intel_fb_invalidate(struct intel_frontbuffer *front,
 83			   enum fb_op_origin origin,
 84			   unsigned int frontbuffer_bits);
 85
 86/**
 87 * intel_frontbuffer_invalidate - invalidate frontbuffer object
 88 * @front: GEM object to invalidate
 89 * @origin: which operation caused the invalidation
 90 *
 91 * This function gets called every time rendering on the given object starts and
 92 * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must
 93 * be invalidated. For ORIGIN_CS any subsequent invalidation will be delayed
 94 * until the rendering completes or a flip on this frontbuffer plane is
 95 * scheduled.
 96 */
 97static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
 98						enum fb_op_origin origin)
 99{
100	unsigned int frontbuffer_bits;
101
102	if (!front)
103		return false;
104
105	frontbuffer_bits = atomic_read(&front->bits);
106	if (!frontbuffer_bits)
107		return false;
108
109	__intel_fb_invalidate(front, origin, frontbuffer_bits);
110	return true;
111}
112
113void __intel_fb_flush(struct intel_frontbuffer *front,
114		      enum fb_op_origin origin,
115		      unsigned int frontbuffer_bits);
116
117/**
118 * intel_frontbuffer_flush - flush frontbuffer object
119 * @front: GEM object to flush
120 * @origin: which operation caused the flush
121 *
122 * This function gets called every time rendering on the given object has
123 * completed and frontbuffer caching can be started again.
124 */
125static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
126					   enum fb_op_origin origin)
127{
128	unsigned int frontbuffer_bits;
129
130	if (!front)
131		return;
132
133	frontbuffer_bits = atomic_read(&front->bits);
134	if (!frontbuffer_bits)
135		return;
136
137	__intel_fb_flush(front, origin, frontbuffer_bits);
138}
139
140void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front);
141
142void intel_frontbuffer_track(struct intel_frontbuffer *old,
143			     struct intel_frontbuffer *new,
144			     unsigned int frontbuffer_bits);
145
146#endif /* __INTEL_FRONTBUFFER_H__ */