Linux Audio

Check our new training course

Loading...
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * The contents of this file are private to DMA engine drivers, and is not
  4 * part of the API to be used by DMA engine users.
  5 */
  6#ifndef DMAENGINE_H
  7#define DMAENGINE_H
  8
  9#include <linux/bug.h>
 10#include <linux/dmaengine.h>
 11
 12/**
 13 * dma_cookie_init - initialize the cookies for a DMA channel
 14 * @chan: dma channel to initialize
 15 */
 16static inline void dma_cookie_init(struct dma_chan *chan)
 17{
 18	chan->cookie = DMA_MIN_COOKIE;
 19	chan->completed_cookie = DMA_MIN_COOKIE;
 20}
 21
 22/**
 23 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
 24 * @tx: descriptor needing cookie
 25 *
 26 * Assign a unique non-zero per-channel cookie to the descriptor.
 27 * Note: caller is expected to hold a lock to prevent concurrency.
 28 */
 29static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
 30{
 31	struct dma_chan *chan = tx->chan;
 32	dma_cookie_t cookie;
 33
 34	cookie = chan->cookie + 1;
 35	if (cookie < DMA_MIN_COOKIE)
 36		cookie = DMA_MIN_COOKIE;
 37	tx->cookie = chan->cookie = cookie;
 38
 39	return cookie;
 40}
 41
 42/**
 43 * dma_cookie_complete - complete a descriptor
 44 * @tx: descriptor to complete
 45 *
 46 * Mark this descriptor complete by updating the channels completed
 47 * cookie marker.  Zero the descriptors cookie to prevent accidental
 48 * repeated completions.
 49 *
 50 * Note: caller is expected to hold a lock to prevent concurrency.
 51 */
 52static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
 53{
 54	BUG_ON(tx->cookie < DMA_MIN_COOKIE);
 55	tx->chan->completed_cookie = tx->cookie;
 56	tx->cookie = 0;
 57}
 58
 59/**
 60 * dma_cookie_status - report cookie status
 61 * @chan: dma channel
 62 * @cookie: cookie we are interested in
 63 * @state: dma_tx_state structure to return last/used cookies
 64 *
 65 * Report the status of the cookie, filling in the state structure if
 66 * non-NULL.  No locking is required.
 67 */
 68static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
 69	dma_cookie_t cookie, struct dma_tx_state *state)
 70{
 71	dma_cookie_t used, complete;
 72
 73	used = chan->cookie;
 74	complete = chan->completed_cookie;
 75	barrier();
 76	if (state) {
 77		state->last = complete;
 78		state->used = used;
 79		state->residue = 0;
 80	}
 81	return dma_async_is_complete(cookie, complete, used);
 82}
 83
 84static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
 85{
 86	if (state)
 87		state->residue = residue;
 88}
 89
 90struct dmaengine_desc_callback {
 91	dma_async_tx_callback callback;
 92	dma_async_tx_callback_result callback_result;
 93	void *callback_param;
 94};
 95
 96/**
 97 * dmaengine_desc_get_callback - get the passed in callback function
 98 * @tx: tx descriptor
 99 * @cb: temp struct to hold the callback info
100 *
101 * Fill the passed in cb struct with what's available in the passed in
102 * tx descriptor struct
103 * No locking is required.
104 */
105static inline void
106dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
107			    struct dmaengine_desc_callback *cb)
108{
109	cb->callback = tx->callback;
110	cb->callback_result = tx->callback_result;
111	cb->callback_param = tx->callback_param;
112}
113
114/**
115 * dmaengine_desc_callback_invoke - call the callback function in cb struct
116 * @cb: temp struct that is holding the callback info
117 * @result: transaction result
118 *
119 * Call the callback function provided in the cb struct with the parameter
120 * in the cb struct.
121 * Locking is dependent on the driver.
122 */
123static inline void
124dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
125			       const struct dmaengine_result *result)
126{
127	struct dmaengine_result dummy_result = {
128		.result = DMA_TRANS_NOERROR,
129		.residue = 0
130	};
131
132	if (cb->callback_result) {
133		if (!result)
134			result = &dummy_result;
135		cb->callback_result(cb->callback_param, result);
136	} else if (cb->callback) {
137		cb->callback(cb->callback_param);
138	}
139}
140
141/**
142 * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
143 * 					then immediately call the callback.
144 * @tx: dma async tx descriptor
145 * @result: transaction result
146 *
147 * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
148 * in a single function since no work is necessary in between for the driver.
149 * Locking is dependent on the driver.
150 */
151static inline void
152dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
153				   const struct dmaengine_result *result)
154{
155	struct dmaengine_desc_callback cb;
156
157	dmaengine_desc_get_callback(tx, &cb);
158	dmaengine_desc_callback_invoke(&cb, result);
159}
160
161/**
162 * dmaengine_desc_callback_valid - verify the callback is valid in cb
163 * @cb: callback info struct
164 *
165 * Return a bool that verifies whether callback in cb is valid or not.
166 * No locking is required.
167 */
168static inline bool
169dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
170{
171	return (cb->callback) ? true : false;
172}
173
174#endif
  1/*
  2 * The contents of this file are private to DMA engine drivers, and is not
  3 * part of the API to be used by DMA engine users.
  4 */
  5#ifndef DMAENGINE_H
  6#define DMAENGINE_H
  7
  8#include <linux/bug.h>
  9#include <linux/dmaengine.h>
 10
 11/**
 12 * dma_cookie_init - initialize the cookies for a DMA channel
 13 * @chan: dma channel to initialize
 14 */
 15static inline void dma_cookie_init(struct dma_chan *chan)
 16{
 17	chan->cookie = DMA_MIN_COOKIE;
 18	chan->completed_cookie = DMA_MIN_COOKIE;
 19}
 20
 21/**
 22 * dma_cookie_assign - assign a DMA engine cookie to the descriptor
 23 * @tx: descriptor needing cookie
 24 *
 25 * Assign a unique non-zero per-channel cookie to the descriptor.
 26 * Note: caller is expected to hold a lock to prevent concurrency.
 27 */
 28static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
 29{
 30	struct dma_chan *chan = tx->chan;
 31	dma_cookie_t cookie;
 32
 33	cookie = chan->cookie + 1;
 34	if (cookie < DMA_MIN_COOKIE)
 35		cookie = DMA_MIN_COOKIE;
 36	tx->cookie = chan->cookie = cookie;
 37
 38	return cookie;
 39}
 40
 41/**
 42 * dma_cookie_complete - complete a descriptor
 43 * @tx: descriptor to complete
 44 *
 45 * Mark this descriptor complete by updating the channels completed
 46 * cookie marker.  Zero the descriptors cookie to prevent accidental
 47 * repeated completions.
 48 *
 49 * Note: caller is expected to hold a lock to prevent concurrency.
 50 */
 51static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
 52{
 53	BUG_ON(tx->cookie < DMA_MIN_COOKIE);
 54	tx->chan->completed_cookie = tx->cookie;
 55	tx->cookie = 0;
 56}
 57
 58/**
 59 * dma_cookie_status - report cookie status
 60 * @chan: dma channel
 61 * @cookie: cookie we are interested in
 62 * @state: dma_tx_state structure to return last/used cookies
 63 *
 64 * Report the status of the cookie, filling in the state structure if
 65 * non-NULL.  No locking is required.
 66 */
 67static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
 68	dma_cookie_t cookie, struct dma_tx_state *state)
 69{
 70	dma_cookie_t used, complete;
 71
 72	used = chan->cookie;
 73	complete = chan->completed_cookie;
 74	barrier();
 75	if (state) {
 76		state->last = complete;
 77		state->used = used;
 78		state->residue = 0;
 79	}
 80	return dma_async_is_complete(cookie, complete, used);
 81}
 82
 83static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
 84{
 85	if (state)
 86		state->residue = residue;
 87}
 88
 89struct dmaengine_desc_callback {
 90	dma_async_tx_callback callback;
 91	dma_async_tx_callback_result callback_result;
 92	void *callback_param;
 93};
 94
 95/**
 96 * dmaengine_desc_get_callback - get the passed in callback function
 97 * @tx: tx descriptor
 98 * @cb: temp struct to hold the callback info
 99 *
100 * Fill the passed in cb struct with what's available in the passed in
101 * tx descriptor struct
102 * No locking is required.
103 */
104static inline void
105dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
106			    struct dmaengine_desc_callback *cb)
107{
108	cb->callback = tx->callback;
109	cb->callback_result = tx->callback_result;
110	cb->callback_param = tx->callback_param;
111}
112
113/**
114 * dmaengine_desc_callback_invoke - call the callback function in cb struct
115 * @cb: temp struct that is holding the callback info
116 * @result: transaction result
117 *
118 * Call the callback function provided in the cb struct with the parameter
119 * in the cb struct.
120 * Locking is dependent on the driver.
121 */
122static inline void
123dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
124			       const struct dmaengine_result *result)
125{
126	struct dmaengine_result dummy_result = {
127		.result = DMA_TRANS_NOERROR,
128		.residue = 0
129	};
130
131	if (cb->callback_result) {
132		if (!result)
133			result = &dummy_result;
134		cb->callback_result(cb->callback_param, result);
135	} else if (cb->callback) {
136		cb->callback(cb->callback_param);
137	}
138}
139
140/**
141 * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
142 * 					then immediately call the callback.
143 * @tx: dma async tx descriptor
144 * @result: transaction result
145 *
146 * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
147 * in a single function since no work is necessary in between for the driver.
148 * Locking is dependent on the driver.
149 */
150static inline void
151dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
152				   const struct dmaengine_result *result)
153{
154	struct dmaengine_desc_callback cb;
155
156	dmaengine_desc_get_callback(tx, &cb);
157	dmaengine_desc_callback_invoke(&cb, result);
158}
159
160/**
161 * dmaengine_desc_callback_valid - verify the callback is valid in cb
162 * @cb: callback info struct
163 *
164 * Return a bool that verifies whether callback in cb is valid or not.
165 * No locking is required.
166 */
167static inline bool
168dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
169{
170	return (cb->callback) ? true : false;
171}
172
173#endif