Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * dma-fence-util: misc functions for dma_fence objects
  4 *
  5 * Copyright (C) 2022 Advanced Micro Devices, Inc.
  6 * Authors:
  7 *	Christian König <christian.koenig@amd.com>
  8 */
  9
 10#include <linux/dma-fence.h>
 11#include <linux/dma-fence-array.h>
 12#include <linux/dma-fence-chain.h>
 13#include <linux/dma-fence-unwrap.h>
 14#include <linux/slab.h>
 
 15
 16/* Internal helper to start new array iteration, don't use directly */
 17static struct dma_fence *
 18__dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
 19{
 20	cursor->array = dma_fence_chain_contained(cursor->chain);
 21	cursor->index = 0;
 22	return dma_fence_array_first(cursor->array);
 23}
 24
 25/**
 26 * dma_fence_unwrap_first - return the first fence from fence containers
 27 * @head: the entrypoint into the containers
 28 * @cursor: current position inside the containers
 29 *
 30 * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
 31 * first fence.
 32 */
 33struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
 34					 struct dma_fence_unwrap *cursor)
 35{
 36	cursor->chain = dma_fence_get(head);
 37	return __dma_fence_unwrap_array(cursor);
 38}
 39EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
 40
 41/**
 42 * dma_fence_unwrap_next - return the next fence from a fence containers
 43 * @cursor: current position inside the containers
 44 *
 45 * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
 46 * the next fence from them.
 47 */
 48struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
 49{
 50	struct dma_fence *tmp;
 51
 52	++cursor->index;
 53	tmp = dma_fence_array_next(cursor->array, cursor->index);
 54	if (tmp)
 55		return tmp;
 56
 57	cursor->chain = dma_fence_chain_walk(cursor->chain);
 58	return __dma_fence_unwrap_array(cursor);
 59}
 60EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
 61
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62/* Implementation for the dma_fence_merge() marco, don't use directly */
 63struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
 64					   struct dma_fence **fences,
 65					   struct dma_fence_unwrap *iter)
 66{
 67	struct dma_fence_array *result;
 68	struct dma_fence *tmp, **array;
 69	ktime_t timestamp;
 70	unsigned int i;
 71	size_t count;
 72
 73	count = 0;
 74	timestamp = ns_to_ktime(0);
 75	for (i = 0; i < num_fences; ++i) {
 76		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
 77			if (!dma_fence_is_signaled(tmp)) {
 78				++count;
 79			} else {
 80				ktime_t t = dma_fence_timestamp(tmp);
 81
 82				if (ktime_after(t, timestamp))
 83					timestamp = t;
 84			}
 85		}
 86	}
 87
 88	/*
 89	 * If we couldn't find a pending fence just return a private signaled
 90	 * fence with the timestamp of the last signaled one.
 91	 */
 92	if (count == 0)
 93		return dma_fence_allocate_private_stub(timestamp);
 94
 95	array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
 96	if (!array)
 97		return NULL;
 98
 99	/*
100	 * This trashes the input fence array and uses it as position for the
101	 * following merge loop. This works because the dma_fence_merge()
102	 * wrapper macro is creating this temporary array on the stack together
103	 * with the iterators.
104	 */
105	for (i = 0; i < num_fences; ++i)
106		fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
107
108	count = 0;
109	do {
110		unsigned int sel;
111
112restart:
113		tmp = NULL;
114		for (i = 0; i < num_fences; ++i) {
115			struct dma_fence *next;
116
117			while (fences[i] && dma_fence_is_signaled(fences[i]))
118				fences[i] = dma_fence_unwrap_next(&iter[i]);
119
120			next = fences[i];
121			if (!next)
122				continue;
123
124			/*
125			 * We can't guarantee that inpute fences are ordered by
126			 * context, but it is still quite likely when this
127			 * function is used multiple times. So attempt to order
128			 * the fences by context as we pass over them and merge
129			 * fences with the same context.
130			 */
131			if (!tmp || tmp->context > next->context) {
132				tmp = next;
133				sel = i;
134
135			} else if (tmp->context < next->context) {
136				continue;
137
138			} else if (dma_fence_is_later(tmp, next)) {
139				fences[i] = dma_fence_unwrap_next(&iter[i]);
140				goto restart;
141			} else {
142				fences[sel] = dma_fence_unwrap_next(&iter[sel]);
143				goto restart;
 
 
144			}
145		}
 
146
147		if (tmp) {
148			array[count++] = dma_fence_get(tmp);
149			fences[sel] = dma_fence_unwrap_next(&iter[sel]);
150		}
151	} while (tmp);
152
153	if (count == 0) {
154		tmp = dma_fence_allocate_private_stub(ktime_get());
155		goto return_tmp;
156	}
157
158	if (count == 1) {
159		tmp = array[0];
160		goto return_tmp;
 
 
 
 
 
 
161	}
 
162
163	result = dma_fence_array_create(count, array,
164					dma_fence_context_alloc(1),
165					1, false);
166	if (!result) {
167		tmp = NULL;
168		goto return_tmp;
 
 
 
 
 
169	}
170	return &result->base;
 
 
 
 
 
171
172return_tmp:
173	kfree(array);
174	return tmp;
175}
176EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * dma-fence-util: misc functions for dma_fence objects
  4 *
  5 * Copyright (C) 2022 Advanced Micro Devices, Inc.
  6 * Authors:
  7 *	Christian König <christian.koenig@amd.com>
  8 */
  9
 10#include <linux/dma-fence.h>
 11#include <linux/dma-fence-array.h>
 12#include <linux/dma-fence-chain.h>
 13#include <linux/dma-fence-unwrap.h>
 14#include <linux/slab.h>
 15#include <linux/sort.h>
 16
 17/* Internal helper to start new array iteration, don't use directly */
 18static struct dma_fence *
 19__dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
 20{
 21	cursor->array = dma_fence_chain_contained(cursor->chain);
 22	cursor->index = 0;
 23	return dma_fence_array_first(cursor->array);
 24}
 25
 26/**
 27 * dma_fence_unwrap_first - return the first fence from fence containers
 28 * @head: the entrypoint into the containers
 29 * @cursor: current position inside the containers
 30 *
 31 * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
 32 * first fence.
 33 */
 34struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
 35					 struct dma_fence_unwrap *cursor)
 36{
 37	cursor->chain = dma_fence_get(head);
 38	return __dma_fence_unwrap_array(cursor);
 39}
 40EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
 41
 42/**
 43 * dma_fence_unwrap_next - return the next fence from a fence containers
 44 * @cursor: current position inside the containers
 45 *
 46 * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
 47 * the next fence from them.
 48 */
 49struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
 50{
 51	struct dma_fence *tmp;
 52
 53	++cursor->index;
 54	tmp = dma_fence_array_next(cursor->array, cursor->index);
 55	if (tmp)
 56		return tmp;
 57
 58	cursor->chain = dma_fence_chain_walk(cursor->chain);
 59	return __dma_fence_unwrap_array(cursor);
 60}
 61EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
 62
 63
 64static int fence_cmp(const void *_a, const void *_b)
 65{
 66	struct dma_fence *a = *(struct dma_fence **)_a;
 67	struct dma_fence *b = *(struct dma_fence **)_b;
 68
 69	if (a->context < b->context)
 70		return -1;
 71	else if (a->context > b->context)
 72		return 1;
 73
 74	if (dma_fence_is_later(b, a))
 75		return 1;
 76	else if (dma_fence_is_later(a, b))
 77		return -1;
 78
 79	return 0;
 80}
 81
 82/* Implementation for the dma_fence_merge() marco, don't use directly */
 83struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
 84					   struct dma_fence **fences,
 85					   struct dma_fence_unwrap *iter)
 86{
 87	struct dma_fence_array *result;
 88	struct dma_fence *tmp, **array;
 89	ktime_t timestamp;
 90	int i, j, count;
 
 91
 92	count = 0;
 93	timestamp = ns_to_ktime(0);
 94	for (i = 0; i < num_fences; ++i) {
 95		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
 96			if (!dma_fence_is_signaled(tmp)) {
 97				++count;
 98			} else {
 99				ktime_t t = dma_fence_timestamp(tmp);
100
101				if (ktime_after(t, timestamp))
102					timestamp = t;
103			}
104		}
105	}
106
107	/*
108	 * If we couldn't find a pending fence just return a private signaled
109	 * fence with the timestamp of the last signaled one.
110	 */
111	if (count == 0)
112		return dma_fence_allocate_private_stub(timestamp);
113
114	array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
115	if (!array)
116		return NULL;
117
 
 
 
 
 
 
 
 
 
118	count = 0;
119	for (i = 0; i < num_fences; ++i) {
120		dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
121			if (!dma_fence_is_signaled(tmp)) {
122				array[count++] = dma_fence_get(tmp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
123			} else {
124				ktime_t t = dma_fence_timestamp(tmp);
125
126				if (ktime_after(t, timestamp))
127					timestamp = t;
128			}
129		}
130	}
131
132	if (count == 0 || count == 1)
133		goto return_fastpath;
 
 
 
134
135	sort(array, count, sizeof(*array), fence_cmp, NULL);
 
 
 
136
137	/*
138	 * Only keep the most recent fence for each context.
139	 */
140	j = 0;
141	for (i = 1; i < count; i++) {
142		if (array[i]->context == array[j]->context)
143			dma_fence_put(array[i]);
144		else
145			array[++j] = array[i];
146	}
147	count = ++j;
148
149	if (count > 1) {
150		result = dma_fence_array_create(count, array,
151						dma_fence_context_alloc(1),
152						1, false);
153		if (!result) {
154			for (i = 0; i < count; i++)
155				dma_fence_put(array[i]);
156			tmp = NULL;
157			goto return_tmp;
158		}
159		return &result->base;
160	}
161
162return_fastpath:
163	if (count == 0)
164		tmp = dma_fence_allocate_private_stub(timestamp);
165	else
166		tmp = array[0];
167
168return_tmp:
169	kfree(array);
170	return tmp;
171}
172EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);