Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2017 Marvell
  4 *
  5 * Antoine Tenart <antoine.tenart@free-electrons.com>
 
 
 
 
  6 */
  7
  8#include <linux/dma-mapping.h>
  9#include <linux/spinlock.h>
 10
 11#include "safexcel.h"
 12
 13int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
 14				   struct safexcel_desc_ring *cdr,
 15				   struct safexcel_desc_ring *rdr)
 16{
 17	int i;
 18	struct safexcel_command_desc *cdesc;
 19	dma_addr_t atok;
 20
 21	/* Actual command descriptor ring */
 22	cdr->offset = priv->config.cd_offset;
 23	cdr->base = dmam_alloc_coherent(priv->dev,
 24					cdr->offset * EIP197_DEFAULT_RING_SIZE,
 25					&cdr->base_dma, GFP_KERNEL);
 26	if (!cdr->base)
 27		return -ENOMEM;
 28	cdr->write = cdr->base;
 29	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
 30	cdr->read = cdr->base;
 31
 32	/* Command descriptor shadow ring for storing additional token data */
 33	cdr->shoffset = priv->config.cdsh_offset;
 34	cdr->shbase = dmam_alloc_coherent(priv->dev,
 35					  cdr->shoffset *
 36					  EIP197_DEFAULT_RING_SIZE,
 37					  &cdr->shbase_dma, GFP_KERNEL);
 38	if (!cdr->shbase)
 39		return -ENOMEM;
 40	cdr->shwrite = cdr->shbase;
 41	cdr->shbase_end = cdr->shbase + cdr->shoffset *
 42					(EIP197_DEFAULT_RING_SIZE - 1);
 43
 44	/*
 45	 * Populate command descriptors with physical pointers to shadow descs.
 46	 * Note that we only need to do this once if we don't overwrite them.
 47	 */
 48	cdesc = cdr->base;
 49	atok = cdr->shbase_dma;
 50	for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
 51		cdesc->atok_lo = lower_32_bits(atok);
 52		cdesc->atok_hi = upper_32_bits(atok);
 53		cdesc = (void *)cdesc + cdr->offset;
 54		atok += cdr->shoffset;
 55	}
 56
 57	rdr->offset = priv->config.rd_offset;
 58	/* Use shoffset for result token offset here */
 59	rdr->shoffset = priv->config.res_offset;
 60	rdr->base = dmam_alloc_coherent(priv->dev,
 61					rdr->offset * EIP197_DEFAULT_RING_SIZE,
 62					&rdr->base_dma, GFP_KERNEL);
 63	if (!rdr->base)
 64		return -ENOMEM;
 65	rdr->write = rdr->base;
 66	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
 67	rdr->read = rdr->base;
 68
 69	return 0;
 70}
 71
 72inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
 73{
 74	return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
 75}
 76
 77static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
 78				     struct safexcel_desc_ring *ring,
 79				     bool first,
 80				     struct safexcel_token **atoken)
 81{
 82	void *ptr = ring->write;
 83
 84	if (first)
 85		*atoken = ring->shwrite;
 86
 87	if ((ring->write == ring->read - ring->offset) ||
 88	    (ring->read == ring->base && ring->write == ring->base_end))
 89		return ERR_PTR(-ENOMEM);
 90
 91	if (ring->write == ring->base_end) {
 92		ring->write = ring->base;
 93		ring->shwrite = ring->shbase;
 94	} else {
 95		ring->write += ring->offset;
 96		ring->shwrite += ring->shoffset;
 97	}
 98
 99	return ptr;
100}
101
102static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
103				     struct safexcel_desc_ring *ring,
104				     struct result_data_desc **rtoken)
105{
106	void *ptr = ring->write;
107
108	/* Result token at relative offset shoffset */
109	*rtoken = ring->write + ring->shoffset;
110
111	if ((ring->write == ring->read - ring->offset) ||
112	    (ring->read == ring->base && ring->write == ring->base_end))
113		return ERR_PTR(-ENOMEM);
114
 
115	if (ring->write == ring->base_end)
116		ring->write = ring->base;
117	else
118		ring->write += ring->offset;
119
 
120	return ptr;
121}
122
123void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
124			      struct safexcel_desc_ring *ring)
125{
126	void *ptr = ring->read;
127
128	if (ring->write == ring->read)
129		return ERR_PTR(-ENOENT);
130
 
131	if (ring->read == ring->base_end)
132		ring->read = ring->base;
133	else
134		ring->read += ring->offset;
135
 
136	return ptr;
137}
138
139inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
140				     int ring)
141{
142	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
143
144	return rdr->read;
145}
146
147inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
148					 int ring)
149{
150	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
151
152	return (rdr->read - rdr->base) / rdr->offset;
153}
154
155inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
156					 int ring,
157					 struct safexcel_result_desc *rdesc)
158{
159	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
160
161	return ((void *)rdesc - rdr->base) / rdr->offset;
162}
163
164void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
165				 struct safexcel_desc_ring *ring)
166{
167	if (ring->write == ring->read)
168		return;
169
170	if (ring->write == ring->base) {
171		ring->write = ring->base_end;
172		ring->shwrite = ring->shbase_end;
173	} else {
174		ring->write -= ring->offset;
175		ring->shwrite -= ring->shoffset;
176	}
177}
178
179struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
180						 int ring_id,
181						 bool first, bool last,
182						 dma_addr_t data, u32 data_len,
183						 u32 full_data_len,
184						 dma_addr_t context,
185						 struct safexcel_token **atoken)
186{
187	struct safexcel_command_desc *cdesc;
 
188
189	cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
190					 first, atoken);
191	if (IS_ERR(cdesc))
192		return cdesc;
193
194	cdesc->particle_size = data_len;
195	cdesc->rsvd0 = 0;
196	cdesc->last_seg = last;
197	cdesc->first_seg = first;
198	cdesc->additional_cdata_size = 0;
199	cdesc->rsvd1 = 0;
200	cdesc->data_lo = lower_32_bits(data);
201	cdesc->data_hi = upper_32_bits(data);
202
203	if (first) {
204		/*
205		 * Note that the length here MUST be >0 or else the EIP(1)97
206		 * may hang. Newer EIP197 firmware actually incorporates this
207		 * fix already, but that doesn't help the EIP97 and we may
208		 * also be running older firmware.
209		 */
210		cdesc->control_data.packet_length = full_data_len ?: 1;
211		cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
212					      EIP197_OPTION_64BIT_CTX |
213					      EIP197_OPTION_CTX_CTRL_IN_CMD |
214					      EIP197_OPTION_RC_AUTO;
215		cdesc->control_data.type = EIP197_TYPE_BCLA;
216		cdesc->control_data.context_lo = lower_32_bits(context) |
217						 EIP197_CONTEXT_SMALL;
218		cdesc->control_data.context_hi = upper_32_bits(context);
 
 
 
 
 
 
219	}
220
221	return cdesc;
222}
223
224struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
225						int ring_id,
226						bool first, bool last,
227						dma_addr_t data, u32 len)
228{
229	struct safexcel_result_desc *rdesc;
230	struct result_data_desc *rtoken;
231
232	rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
233					 &rtoken);
234	if (IS_ERR(rdesc))
235		return rdesc;
236
237	rdesc->particle_size = len;
238	rdesc->rsvd0 = 0;
239	rdesc->descriptor_overflow = 1; /* assume error */
240	rdesc->buffer_overflow = 1;     /* assume error */
241	rdesc->last_seg = last;
242	rdesc->first_seg = first;
243	rdesc->result_size = EIP197_RD64_RESULT_SIZE;
244	rdesc->rsvd1 = 0;
245	rdesc->data_lo = lower_32_bits(data);
246	rdesc->data_hi = upper_32_bits(data);
247
248	/* Clear length in result token */
249	rtoken->packet_length = 0;
250	/* Assume errors - HW will clear if not the case */
251	rtoken->error_code = 0x7fff;
252
253	return rdesc;
254}
v4.17
 
  1/*
  2 * Copyright (C) 2017 Marvell
  3 *
  4 * Antoine Tenart <antoine.tenart@free-electrons.com>
  5 *
  6 * This file is licensed under the terms of the GNU General Public
  7 * License version 2. This program is licensed "as is" without any
  8 * warranty of any kind, whether express or implied.
  9 */
 10
 11#include <linux/dma-mapping.h>
 12#include <linux/spinlock.h>
 13
 14#include "safexcel.h"
 15
 16int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
 17				   struct safexcel_ring *cdr,
 18				   struct safexcel_ring *rdr)
 19{
 20	cdr->offset = sizeof(u32) * priv->config.cd_offset;
 
 
 
 
 
 21	cdr->base = dmam_alloc_coherent(priv->dev,
 22					cdr->offset * EIP197_DEFAULT_RING_SIZE,
 23					&cdr->base_dma, GFP_KERNEL);
 24	if (!cdr->base)
 25		return -ENOMEM;
 26	cdr->write = cdr->base;
 27	cdr->base_end = cdr->base + cdr->offset * EIP197_DEFAULT_RING_SIZE;
 28	cdr->read = cdr->base;
 29
 30	rdr->offset = sizeof(u32) * priv->config.rd_offset;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31	rdr->base = dmam_alloc_coherent(priv->dev,
 32					rdr->offset * EIP197_DEFAULT_RING_SIZE,
 33					&rdr->base_dma, GFP_KERNEL);
 34	if (!rdr->base)
 35		return -ENOMEM;
 36	rdr->write = rdr->base;
 37	rdr->base_end = rdr->base + rdr->offset * EIP197_DEFAULT_RING_SIZE;
 38	rdr->read = rdr->base;
 39
 40	return 0;
 41}
 42
 43inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
 44{
 45	return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
 46}
 47
 48static void *safexcel_ring_next_wptr(struct safexcel_crypto_priv *priv,
 49				     struct safexcel_ring *ring)
 
 
 50{
 51	void *ptr = ring->write;
 52
 53	if (ring->nr == EIP197_DEFAULT_RING_SIZE - 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54		return ERR_PTR(-ENOMEM);
 55
 56	ring->write += ring->offset;
 57	if (ring->write == ring->base_end)
 58		ring->write = ring->base;
 
 
 59
 60	ring->nr++;
 61	return ptr;
 62}
 63
 64void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
 65			      struct safexcel_ring *ring)
 66{
 67	void *ptr = ring->read;
 68
 69	if (!ring->nr)
 70		return ERR_PTR(-ENOENT);
 71
 72	ring->read += ring->offset;
 73	if (ring->read == ring->base_end)
 74		ring->read = ring->base;
 
 
 75
 76	ring->nr--;
 77	return ptr;
 78}
 79
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
 81				 struct safexcel_ring *ring)
 82{
 83	if (!ring->nr)
 84		return;
 85
 86	if (ring->write == ring->base)
 87		ring->write = ring->base_end - ring->offset;
 88	else
 
 89		ring->write -= ring->offset;
 90
 91	ring->nr--;
 92}
 93
 94struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
 95						 int ring_id,
 96						 bool first, bool last,
 97						 dma_addr_t data, u32 data_len,
 98						 u32 full_data_len,
 99						 dma_addr_t context) {
 
 
100	struct safexcel_command_desc *cdesc;
101	int i;
102
103	cdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].cdr);
 
104	if (IS_ERR(cdesc))
105		return cdesc;
106
107	memset(cdesc, 0, sizeof(struct safexcel_command_desc));
108
 
109	cdesc->first_seg = first;
110	cdesc->last_seg = last;
111	cdesc->particle_size = data_len;
112	cdesc->data_lo = lower_32_bits(data);
113	cdesc->data_hi = upper_32_bits(data);
114
115	if (first && context) {
116		struct safexcel_token *token =
117			(struct safexcel_token *)cdesc->control_data.token;
118
119		cdesc->control_data.packet_length = full_data_len;
 
 
 
120		cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
121					      EIP197_OPTION_64BIT_CTX |
122					      EIP197_OPTION_CTX_CTRL_IN_CMD;
123		cdesc->control_data.context_lo =
124			(lower_32_bits(context) & GENMASK(31, 2)) >> 2;
 
 
125		cdesc->control_data.context_hi = upper_32_bits(context);
126
127		/* TODO: large xform HMAC with SHA-384/512 uses refresh = 3 */
128		cdesc->control_data.refresh = 2;
129
130		for (i = 0; i < EIP197_MAX_TOKENS; i++)
131			eip197_noop_token(&token[i]);
132	}
133
134	return cdesc;
135}
136
137struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
138						int ring_id,
139						bool first, bool last,
140						dma_addr_t data, u32 len)
141{
142	struct safexcel_result_desc *rdesc;
 
143
144	rdesc = safexcel_ring_next_wptr(priv, &priv->ring[ring_id].rdr);
 
145	if (IS_ERR(rdesc))
146		return rdesc;
147
148	memset(rdesc, 0, sizeof(struct safexcel_result_desc));
149
 
 
 
150	rdesc->first_seg = first;
151	rdesc->last_seg = last;
152	rdesc->particle_size = len;
153	rdesc->data_lo = lower_32_bits(data);
154	rdesc->data_hi = upper_32_bits(data);
 
 
 
 
 
155
156	return rdesc;
157}