Linux Audio

Check our new training course

Loading...
v5.14.15
  1/* SPDX-License-Identifier: GPL-2.0
  2 *
  3 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
  4 *
  5 * extracted from shdma.c and headers
  6 *
  7 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  8 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
  9 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
 10 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 
 
 
 
 11 */
 12
 13#ifndef SHDMA_BASE_H
 14#define SHDMA_BASE_H
 15
 16#include <linux/dmaengine.h>
 17#include <linux/interrupt.h>
 18#include <linux/list.h>
 19#include <linux/types.h>
 20
 21/**
 22 * shdma_pm_state - DMA channel PM state
 23 * SHDMA_PM_ESTABLISHED:	either idle or during data transfer
 24 * SHDMA_PM_BUSY:		during the transfer preparation, when we have to
 25 *				drop the lock temporarily
 26 * SHDMA_PM_PENDING:	transfers pending
 27 */
 28enum shdma_pm_state {
 29	SHDMA_PM_ESTABLISHED,
 30	SHDMA_PM_BUSY,
 31	SHDMA_PM_PENDING,
 32};
 33
 34struct device;
 35
 36/*
 37 * Drivers, using this library are expected to embed struct shdma_dev,
 38 * struct shdma_chan, struct shdma_desc, and struct shdma_slave
 39 * in their respective device, channel, descriptor and slave objects.
 40 */
 41
 42struct shdma_slave {
 43	int slave_id;
 44};
 45
 46struct shdma_desc {
 47	struct list_head node;
 48	struct dma_async_tx_descriptor async_tx;
 49	enum dma_transfer_direction direction;
 50	size_t partial;
 51	dma_cookie_t cookie;
 52	int chunks;
 53	int mark;
 54	bool cyclic;			/* used as cyclic transfer */
 55};
 56
 57struct shdma_chan {
 58	spinlock_t chan_lock;		/* Channel operation lock */
 59	struct list_head ld_queue;	/* Link descriptors queue */
 60	struct list_head ld_free;	/* Free link descriptors */
 61	struct dma_chan dma_chan;	/* DMA channel */
 62	struct device *dev;		/* Channel device */
 63	void *desc;			/* buffer for descriptor array */
 64	int desc_num;			/* desc count */
 65	size_t max_xfer_len;		/* max transfer length */
 66	int id;				/* Raw id of this channel */
 67	int irq;			/* Channel IRQ */
 68	int slave_id;			/* Client ID for slave DMA */
 69	int real_slave_id;		/* argument passed to filter function */
 70	int hw_req;			/* DMA request line for slave DMA - same
 71					 * as MID/RID, used with DT */
 72	enum shdma_pm_state pm_state;
 73};
 74
 75/**
 76 * struct shdma_ops - simple DMA driver operations
 77 * desc_completed:	return true, if this is the descriptor, that just has
 78 *			completed (atomic)
 79 * halt_channel:	stop DMA channel operation (atomic)
 80 * channel_busy:	return true, if the channel is busy (atomic)
 81 * slave_addr:		return slave DMA address
 82 * desc_setup:		set up the hardware specific descriptor portion (atomic)
 83 * set_slave:		bind channel to a slave
 84 * setup_xfer:		configure channel hardware for operation (atomic)
 85 * start_xfer:		start the DMA transfer (atomic)
 86 * embedded_desc:	return Nth struct shdma_desc pointer from the
 87 *			descriptor array
 88 * chan_irq:		process channel IRQ, return true if a transfer has
 89 *			completed (atomic)
 90 */
 91struct shdma_ops {
 92	bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
 93	void (*halt_channel)(struct shdma_chan *);
 94	bool (*channel_busy)(struct shdma_chan *);
 95	dma_addr_t (*slave_addr)(struct shdma_chan *);
 96	int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
 97			  dma_addr_t, dma_addr_t, size_t *);
 98	int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
 99	void (*setup_xfer)(struct shdma_chan *, int);
100	void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
101	struct shdma_desc *(*embedded_desc)(void *, int);
102	bool (*chan_irq)(struct shdma_chan *, int);
103	size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
104};
105
106struct shdma_dev {
107	struct dma_device dma_dev;
108	struct shdma_chan **schan;
109	const struct shdma_ops *ops;
110	size_t desc_size;
111};
112
113#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
114				i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
115
116int shdma_request_irq(struct shdma_chan *, int,
117			   unsigned long, const char *);
118bool shdma_reset(struct shdma_dev *sdev);
119void shdma_chan_probe(struct shdma_dev *sdev,
120			   struct shdma_chan *schan, int id);
121void shdma_chan_remove(struct shdma_chan *schan);
122int shdma_init(struct device *dev, struct shdma_dev *sdev,
123		    int chan_num);
124void shdma_cleanup(struct shdma_dev *sdev);
125#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
126bool shdma_chan_filter(struct dma_chan *chan, void *arg);
127#else
128static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
129{
130	return false;
131}
132#endif
133
134#endif
v4.6
  1/*
 
  2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
  3 *
  4 * extracted from shdma.c and headers
  5 *
  6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
  7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
  8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
  9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
 10 *
 11 * This is free software; you can redistribute it and/or modify
 12 * it under the terms of version 2 of the GNU General Public License as
 13 * published by the Free Software Foundation.
 14 */
 15
 16#ifndef SHDMA_BASE_H
 17#define SHDMA_BASE_H
 18
 19#include <linux/dmaengine.h>
 20#include <linux/interrupt.h>
 21#include <linux/list.h>
 22#include <linux/types.h>
 23
 24/**
 25 * shdma_pm_state - DMA channel PM state
 26 * SHDMA_PM_ESTABLISHED:	either idle or during data transfer
 27 * SHDMA_PM_BUSY:		during the transfer preparation, when we have to
 28 *				drop the lock temporarily
 29 * SHDMA_PM_PENDING:	transfers pending
 30 */
 31enum shdma_pm_state {
 32	SHDMA_PM_ESTABLISHED,
 33	SHDMA_PM_BUSY,
 34	SHDMA_PM_PENDING,
 35};
 36
 37struct device;
 38
 39/*
 40 * Drivers, using this library are expected to embed struct shdma_dev,
 41 * struct shdma_chan, struct shdma_desc, and struct shdma_slave
 42 * in their respective device, channel, descriptor and slave objects.
 43 */
 44
 45struct shdma_slave {
 46	int slave_id;
 47};
 48
 49struct shdma_desc {
 50	struct list_head node;
 51	struct dma_async_tx_descriptor async_tx;
 52	enum dma_transfer_direction direction;
 53	size_t partial;
 54	dma_cookie_t cookie;
 55	int chunks;
 56	int mark;
 57	bool cyclic;			/* used as cyclic transfer */
 58};
 59
 60struct shdma_chan {
 61	spinlock_t chan_lock;		/* Channel operation lock */
 62	struct list_head ld_queue;	/* Link descriptors queue */
 63	struct list_head ld_free;	/* Free link descriptors */
 64	struct dma_chan dma_chan;	/* DMA channel */
 65	struct device *dev;		/* Channel device */
 66	void *desc;			/* buffer for descriptor array */
 67	int desc_num;			/* desc count */
 68	size_t max_xfer_len;		/* max transfer length */
 69	int id;				/* Raw id of this channel */
 70	int irq;			/* Channel IRQ */
 71	int slave_id;			/* Client ID for slave DMA */
 72	int real_slave_id;		/* argument passed to filter function */
 73	int hw_req;			/* DMA request line for slave DMA - same
 74					 * as MID/RID, used with DT */
 75	enum shdma_pm_state pm_state;
 76};
 77
 78/**
 79 * struct shdma_ops - simple DMA driver operations
 80 * desc_completed:	return true, if this is the descriptor, that just has
 81 *			completed (atomic)
 82 * halt_channel:	stop DMA channel operation (atomic)
 83 * channel_busy:	return true, if the channel is busy (atomic)
 84 * slave_addr:		return slave DMA address
 85 * desc_setup:		set up the hardware specific descriptor portion (atomic)
 86 * set_slave:		bind channel to a slave
 87 * setup_xfer:		configure channel hardware for operation (atomic)
 88 * start_xfer:		start the DMA transfer (atomic)
 89 * embedded_desc:	return Nth struct shdma_desc pointer from the
 90 *			descriptor array
 91 * chan_irq:		process channel IRQ, return true if a transfer has
 92 *			completed (atomic)
 93 */
 94struct shdma_ops {
 95	bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
 96	void (*halt_channel)(struct shdma_chan *);
 97	bool (*channel_busy)(struct shdma_chan *);
 98	dma_addr_t (*slave_addr)(struct shdma_chan *);
 99	int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
100			  dma_addr_t, dma_addr_t, size_t *);
101	int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
102	void (*setup_xfer)(struct shdma_chan *, int);
103	void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
104	struct shdma_desc *(*embedded_desc)(void *, int);
105	bool (*chan_irq)(struct shdma_chan *, int);
106	size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
107};
108
109struct shdma_dev {
110	struct dma_device dma_dev;
111	struct shdma_chan **schan;
112	const struct shdma_ops *ops;
113	size_t desc_size;
114};
115
116#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
117				i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
118
119int shdma_request_irq(struct shdma_chan *, int,
120			   unsigned long, const char *);
121bool shdma_reset(struct shdma_dev *sdev);
122void shdma_chan_probe(struct shdma_dev *sdev,
123			   struct shdma_chan *schan, int id);
124void shdma_chan_remove(struct shdma_chan *schan);
125int shdma_init(struct device *dev, struct shdma_dev *sdev,
126		    int chan_num);
127void shdma_cleanup(struct shdma_dev *sdev);
128#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
129bool shdma_chan_filter(struct dma_chan *chan, void *arg);
130#else
131static inline bool shdma_chan_filter(struct dma_chan *chan, void *arg)
132{
133	return false;
134}
135#endif
136
137#endif