Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Device memory TCP support
  4 *
  5 * Authors:	Mina Almasry <almasrymina@google.com>
  6 *		Willem de Bruijn <willemb@google.com>
  7 *		Kaiyuan Zhang <kaiyuanz@google.com>
  8 *
  9 */
 10#ifndef _NET_DEVMEM_H
 11#define _NET_DEVMEM_H
 12
 13struct netlink_ext_ack;
 14
 15struct net_devmem_dmabuf_binding {
 16	struct dma_buf *dmabuf;
 17	struct dma_buf_attachment *attachment;
 18	struct sg_table *sgt;
 19	struct net_device *dev;
 20	struct gen_pool *chunk_pool;
 21
 22	/* The user holds a ref (via the netlink API) for as long as they want
 23	 * the binding to remain alive. Each page pool using this binding holds
 24	 * a ref to keep the binding alive. Each allocated net_iov holds a
 25	 * ref.
 26	 *
 27	 * The binding undos itself and unmaps the underlying dmabuf once all
 28	 * those refs are dropped and the binding is no longer desired or in
 29	 * use.
 30	 */
 31	refcount_t ref;
 32
 33	/* The list of bindings currently active. Used for netlink to notify us
 34	 * of the user dropping the bind.
 35	 */
 36	struct list_head list;
 37
 38	/* rxq's this binding is active on. */
 39	struct xarray bound_rxqs;
 40
 41	/* ID of this binding. Globally unique to all bindings currently
 42	 * active.
 43	 */
 44	u32 id;
 45};
 46
 47#if defined(CONFIG_NET_DEVMEM)
 48/* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist
 49 * entry from the dmabuf is inserted into the genpool as a chunk, and needs
 50 * this owner struct to keep track of some metadata necessary to create
 51 * allocations from this chunk.
 52 */
 53struct dmabuf_genpool_chunk_owner {
 54	/* Offset into the dma-buf where this chunk starts.  */
 55	unsigned long base_virtual;
 56
 57	/* dma_addr of the start of the chunk.  */
 58	dma_addr_t base_dma_addr;
 59
 60	/* Array of net_iovs for this chunk. */
 61	struct net_iov *niovs;
 62	size_t num_niovs;
 63
 64	struct net_devmem_dmabuf_binding *binding;
 65};
 66
 67void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding);
 68struct net_devmem_dmabuf_binding *
 69net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
 70		       struct netlink_ext_ack *extack);
 71void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding);
 72int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
 73				    struct net_devmem_dmabuf_binding *binding,
 74				    struct netlink_ext_ack *extack);
 75void dev_dmabuf_uninstall(struct net_device *dev);
 76
 77static inline struct dmabuf_genpool_chunk_owner *
 78net_iov_owner(const struct net_iov *niov)
 79{
 80	return niov->owner;
 81}
 82
 83static inline unsigned int net_iov_idx(const struct net_iov *niov)
 84{
 85	return niov - net_iov_owner(niov)->niovs;
 86}
 87
 88static inline struct net_devmem_dmabuf_binding *
 89net_iov_binding(const struct net_iov *niov)
 90{
 91	return net_iov_owner(niov)->binding;
 92}
 93
 94static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
 95{
 96	struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov);
 97
 98	return owner->base_virtual +
 99	       ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT);
100}
101
102static inline u32 net_iov_binding_id(const struct net_iov *niov)
103{
104	return net_iov_owner(niov)->binding->id;
105}
106
107static inline void
108net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding)
109{
110	refcount_inc(&binding->ref);
111}
112
113static inline void
114net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding)
115{
116	if (!refcount_dec_and_test(&binding->ref))
117		return;
118
119	__net_devmem_dmabuf_binding_free(binding);
120}
121
122struct net_iov *
123net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding);
124void net_devmem_free_dmabuf(struct net_iov *ppiov);
125
126#else
127struct net_devmem_dmabuf_binding;
128
129static inline void
130__net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding)
131{
132}
133
134static inline struct net_devmem_dmabuf_binding *
135net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd,
136		       struct netlink_ext_ack *extack)
137{
138	return ERR_PTR(-EOPNOTSUPP);
139}
140
141static inline void
142net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding)
143{
144}
145
146static inline int
147net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx,
148				struct net_devmem_dmabuf_binding *binding,
149				struct netlink_ext_ack *extack)
150
151{
152	return -EOPNOTSUPP;
153}
154
155static inline void dev_dmabuf_uninstall(struct net_device *dev)
156{
157}
158
159static inline struct net_iov *
160net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding)
161{
162	return NULL;
163}
164
165static inline void net_devmem_free_dmabuf(struct net_iov *ppiov)
166{
167}
168
169static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov)
170{
171	return 0;
172}
173
174static inline u32 net_iov_binding_id(const struct net_iov *niov)
175{
176	return 0;
177}
178#endif
179
180#endif /* _NET_DEVMEM_H */