Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  4 * Copyright (C) 2019-2023 Linaro Ltd.
  5 */
  6
  7#include <linux/types.h>
  8#include <linux/device.h>
  9#include <linux/slab.h>
 10#include <linux/bitfield.h>
 
 
 11#include <linux/dma-direction.h>
 
 12
 13#include "gsi.h"
 14#include "gsi_trans.h"
 15#include "ipa.h"
 16#include "ipa_endpoint.h"
 17#include "ipa_table.h"
 18#include "ipa_cmd.h"
 
 19#include "ipa_mem.h"
 
 
 20
 21/**
 22 * DOC:  IPA Immediate Commands
 23 *
 24 * The AP command TX endpoint is used to issue immediate commands to the IPA.
 25 * An immediate command is generally used to request the IPA do something
 26 * other than data transfer to another endpoint.
 27 *
 28 * Immediate commands are represented by GSI transactions just like other
 29 * transfer requests, and use a single GSI TRE.  Each immediate command
 30 * has a well-defined format, having a payload of a known length.  This
 31 * allows the transfer element's length field to be used to hold an
 32 * immediate command's opcode.  The payload for a command resides in AP
 33 * memory and is described by a single scatterlist entry in its transaction.
 34 * Commands do not require a transaction completion callback, and are
 35 * always issued using gsi_trans_commit_wait().
 36 */
 37
 38/* Some commands can wait until indicated pipeline stages are clear */
 39enum pipeline_clear_options {
 40	pipeline_clear_hps		= 0x0,
 41	pipeline_clear_src_grp		= 0x1,
 42	pipeline_clear_full		= 0x2,
 43};
 44
 45/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
 46
 47struct ipa_cmd_hw_ip_fltrt_init {
 48	__le64 hash_rules_addr;
 49	__le64 flags;
 50	__le64 nhash_rules_addr;
 51};
 52
 53/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
 54#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK			GENMASK_ULL(11, 0)
 55#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK			GENMASK_ULL(27, 12)
 56#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK			GENMASK_ULL(39, 28)
 57#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK			GENMASK_ULL(55, 40)
 58
 59/* IPA_CMD_HDR_INIT_LOCAL */
 60
 61struct ipa_cmd_hw_hdr_init_local {
 62	__le64 hdr_table_addr;
 63	__le32 flags;
 64	__le32 reserved;
 65};
 66
 67/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
 68#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK		GENMASK(11, 0)
 69#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK		GENMASK(27, 12)
 70
 71/* IPA_CMD_REGISTER_WRITE */
 72
 73/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
 74#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
 75#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
 76
 77struct ipa_cmd_register_write {
 78	__le16 flags;		/* Unused/reserved prior to IPA v4.0 */
 79	__le16 offset;
 80	__le32 value;
 81	__le32 value_mask;
 82	__le32 clear_options;	/* Unused/reserved for IPA v4.0+ */
 83};
 84
 85/* Field masks for ipa_cmd_register_write structure fields */
 86/* The next field is present for IPA v4.0+ */
 87#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK		GENMASK(14, 11)
 88/* The next field is not present for IPA v4.0+ */
 89#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK		GENMASK(15, 15)
 90
 91/* The next field and its values are not present for IPA v4.0+ */
 92#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK		GENMASK(1, 0)
 93
 94/* IPA_CMD_IP_PACKET_INIT */
 95
 96struct ipa_cmd_ip_packet_init {
 97	u8 dest_endpoint;	/* Full 8 bits used for IPA v5.0+ */
 98	u8 reserved[7];
 99};
100
101/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
102#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK		GENMASK(4, 0)
103
104/* IPA_CMD_DMA_SHARED_MEM */
105
106/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
107
108#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
109#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
110
111struct ipa_cmd_hw_dma_mem_mem {
112	__le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
113	__le16 size;
114	__le16 local_addr;
115	__le16 flags;
116	__le64 system_addr;
117};
118
119/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
120#define DMA_SHARED_MEM_CLEAR_AFTER_READ			GENMASK(15, 15)
121
122/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
123#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK		GENMASK(0, 0)
124/* The next two fields are not present for IPA v4.0+ */
125#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK		GENMASK(1, 1)
126#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK	GENMASK(3, 2)
127
128/* IPA_CMD_IP_PACKET_TAG_STATUS */
129
130struct ipa_cmd_ip_packet_tag_status {
131	__le64 tag;
132};
133
134#define IP_PACKET_TAG_STATUS_TAG_FMASK			GENMASK_ULL(63, 16)
135
136/* Immediate command payload */
137union ipa_cmd_payload {
138	struct ipa_cmd_hw_ip_fltrt_init table_init;
139	struct ipa_cmd_hw_hdr_init_local hdr_init_local;
140	struct ipa_cmd_register_write register_write;
141	struct ipa_cmd_ip_packet_init ip_packet_init;
142	struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
143	struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
144};
145
146static void ipa_cmd_validate_build(void)
147{
148	/* The size of a filter table needs to fit into fields in the
149	 * ipa_cmd_hw_ip_fltrt_init structure.  Although hashed tables
150	 * might not be used, non-hashed and hashed tables have the same
151	 * maximum size.  IPv4 and IPv6 filter tables have the same number
152	 * of entries.
153	 */
154	/* Hashed and non-hashed fields are assumed to be the same size */
155	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
156		     field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
157	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
158		     field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
159
160	/* Prior to IPA v5.0, we supported no more than 32 endpoints,
161	 * and this was reflected in some 5-bit fields that held
162	 * endpoint numbers.  Starting with IPA v5.0, the widths of
163	 * these fields were extended to 8 bits, meaning up to 256
164	 * endpoints.  If the driver claims to support more than
165	 * that it's an error.
166	 */
167	BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
168}
169
170/* Validate a memory region holding a table */
171bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
172			      bool route)
173{
174	u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
175	u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
176	const char *table = route ? "route" : "filter";
177	struct device *dev = &ipa->pdev->dev;
178	u32 size;
179
180	size = route ? ipa->route_count : ipa->filter_count + 1;
181	size *= sizeof(__le64);
182
183	/* Size must fit in the immediate command field that holds it */
184	if (size > size_max) {
185		dev_err(dev, "%s table region size too large\n", table);
186		dev_err(dev, "    (0x%04x > 0x%04x)\n", size, size_max);
187
188		return false;
189	}
190
191	/* Offset must fit in the immediate command field that holds it */
192	if (mem->offset > offset_max ||
193	    ipa->mem_offset > offset_max - mem->offset) {
194		dev_err(dev, "%s table region offset too large\n", table);
195		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
196			ipa->mem_offset, mem->offset, offset_max);
197
198		return false;
199	}
200
201	return true;
202}
203
204/* Validate the memory region that holds headers */
205static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
206{
207	struct device *dev = &ipa->pdev->dev;
208	const struct ipa_mem *mem;
209	u32 offset_max;
210	u32 size_max;
211	u32 offset;
212	u32 size;
213
214	/* In ipa_cmd_hdr_init_local_add() we record the offset and size of
215	 * the header table memory area in an immediate command.  Make sure
216	 * the offset and size fit in the fields that need to hold them, and
217	 * that the entire range is within the overall IPA memory range.
218	 */
219	offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
220	size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
221
222	/* The header memory area contains both the modem and AP header
223	 * regions.  The modem portion defines the address of the region.
224	 */
225	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
226	offset = mem->offset;
227	size = mem->size;
228
229	/* Make sure the offset fits in the IPA command */
230	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
231		dev_err(dev, "header table region offset too large\n");
232		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
233			ipa->mem_offset, offset, offset_max);
234
235		return false;
236	}
237
238	/* Add the size of the AP portion (if defined) to the combined size */
239	mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
240	if (mem)
241		size += mem->size;
242
243	/* Make sure the combined size fits in the IPA command */
244	if (size > size_max) {
245		dev_err(dev, "header table region size too large\n");
246		dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
247
248		return false;
249	}
250
251	return true;
252}
253
254/* Indicate whether an offset can be used with a register_write command */
255static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
256						const char *name, u32 offset)
257{
258	struct ipa_cmd_register_write *payload;
259	struct device *dev = &ipa->pdev->dev;
260	u32 offset_max;
261	u32 bit_count;
262
263	/* The maximum offset in a register_write immediate command depends
264	 * on the version of IPA.  A 16 bit offset is always supported,
265	 * but starting with IPA v4.0 some additional high-order bits are
266	 * allowed.
267	 */
268	bit_count = BITS_PER_BYTE * sizeof(payload->offset);
269	if (ipa->version >= IPA_VERSION_4_0)
270		bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
271	BUILD_BUG_ON(bit_count > 32);
272	offset_max = ~0U >> (32 - bit_count);
273
274	/* Make sure the offset can be represented by the field(s)
275	 * that holds it.  Also make sure the offset is not outside
276	 * the overall IPA memory range.
277	 */
278	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
279		dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
280			name, ipa->mem_offset, offset, offset_max);
281		return false;
282	}
283
284	return true;
285}
286
287/* Check whether offsets passed to register_write are valid */
288static bool ipa_cmd_register_write_valid(struct ipa *ipa)
289{
290	const struct reg *reg;
291	const char *name;
292	u32 offset;
293
294	/* If hashed tables are supported, ensure the hash flush register
295	 * offset will fit in a register write IPA immediate command.
296	 */
297	if (ipa_table_hash_support(ipa)) {
298		if (ipa->version < IPA_VERSION_5_0)
299			reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
300		else
301			reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
302
303		offset = reg_offset(reg);
304		name = "filter/route hash flush";
305		if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
306			return false;
307	}
308
309	/* Each endpoint can have a status endpoint associated with it,
310	 * and this is recorded in an endpoint register.  If the modem
311	 * crashes, we reset the status endpoint for all modem endpoints
312	 * using a register write IPA immediate command.  Make sure the
313	 * worst case (highest endpoint number) offset of that endpoint
314	 * fits in the register write command field(s) that must hold it.
315	 */
316	reg = ipa_reg(ipa, ENDP_STATUS);
317	offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
318	name = "maximal endpoint status";
319	if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
320		return false;
321
322	return true;
323}
324
325int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
326{
327	struct gsi_trans_info *trans_info = &channel->trans_info;
328	struct device *dev = channel->gsi->dev;
329
330	/* Command payloads are allocated one at a time, but a single
331	 * transaction can require up to the maximum supported by the
332	 * channel; treat them as if they were allocated all at once.
333	 */
334	return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
335				       sizeof(union ipa_cmd_payload),
336				       tre_max, channel->trans_tre_max);
337}
338
339void ipa_cmd_pool_exit(struct gsi_channel *channel)
340{
341	struct gsi_trans_info *trans_info = &channel->trans_info;
342	struct device *dev = channel->gsi->dev;
343
344	gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
345}
346
347static union ipa_cmd_payload *
348ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
349{
350	struct gsi_trans_info *trans_info;
351	struct ipa_endpoint *endpoint;
352
353	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
354	trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
355
356	return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
357}
358
359/* If hash_size is 0, hash_offset and hash_addr ignored. */
360void ipa_cmd_table_init_add(struct gsi_trans *trans,
361			    enum ipa_cmd_opcode opcode, u16 size, u32 offset,
362			    dma_addr_t addr, u16 hash_size, u32 hash_offset,
363			    dma_addr_t hash_addr)
364{
365	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
366	struct ipa_cmd_hw_ip_fltrt_init *payload;
367	union ipa_cmd_payload *cmd_payload;
368	dma_addr_t payload_addr;
369	u64 val;
370
371	/* Record the non-hash table offset and size */
372	offset += ipa->mem_offset;
373	val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
374	val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
375
376	/* The hash table offset and address are zero if its size is 0 */
377	if (hash_size) {
378		/* Record the hash table offset and size */
379		hash_offset += ipa->mem_offset;
380		val |= u64_encode_bits(hash_offset,
381				       IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
382		val |= u64_encode_bits(hash_size,
383				       IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
384	}
385
386	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
387	payload = &cmd_payload->table_init;
388
389	/* Fill in all offsets and sizes and the non-hash table address */
390	if (hash_size)
391		payload->hash_rules_addr = cpu_to_le64(hash_addr);
392	payload->flags = cpu_to_le64(val);
393	payload->nhash_rules_addr = cpu_to_le64(addr);
394
395	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
396			  opcode);
397}
398
399/* Initialize header space in IPA-local memory */
400void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
401				dma_addr_t addr)
402{
403	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
404	enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
405	struct ipa_cmd_hw_hdr_init_local *payload;
406	union ipa_cmd_payload *cmd_payload;
407	dma_addr_t payload_addr;
408	u32 flags;
409
410	offset += ipa->mem_offset;
411
412	/* With this command we tell the IPA where in its local memory the
413	 * header tables reside.  The content of the buffer provided is
414	 * also written via DMA into that space.  The IPA hardware owns
415	 * the table, but the AP must initialize it.
416	 */
417	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
418	payload = &cmd_payload->hdr_init_local;
419
420	payload->hdr_table_addr = cpu_to_le64(addr);
421	flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
422	flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
423	payload->flags = cpu_to_le32(flags);
424
425	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
426			  opcode);
427}
428
429void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
430				u32 mask, bool clear_full)
431{
432	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
433	struct ipa_cmd_register_write *payload;
434	union ipa_cmd_payload *cmd_payload;
435	u32 opcode = IPA_CMD_REGISTER_WRITE;
436	dma_addr_t payload_addr;
437	u32 clear_option;
438	u32 options;
439	u16 flags;
440
441	/* pipeline_clear_src_grp is not used */
442	clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
443
444	/* IPA v4.0+ represents the pipeline clear options in the opcode.  It
445	 * also supports a larger offset by encoding additional high-order
446	 * bits in the payload flags field.
447	 */
448	if (ipa->version >= IPA_VERSION_4_0) {
449		u16 offset_high;
450		u32 val;
451
452		/* Opcode encodes pipeline clear options */
453		/* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
454		val = u16_encode_bits(clear_option,
455				      REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
456		opcode |= val;
457
458		/* Extract the high 4 bits from the offset */
459		offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
460		offset &= (1 << 16) - 1;
461
462		/* Extract the top 4 bits and encode it into the flags field */
463		flags = u16_encode_bits(offset_high,
464				REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
465		options = 0;	/* reserved */
466
467	} else {
468		flags = 0;	/* SKIP_CLEAR flag is always 0 */
469		options = u16_encode_bits(clear_option,
470					  REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
471	}
472
473	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
474	payload = &cmd_payload->register_write;
475
476	payload->flags = cpu_to_le16(flags);
477	payload->offset = cpu_to_le16((u16)offset);
478	payload->value = cpu_to_le32(value);
479	payload->value_mask = cpu_to_le32(mask);
480	payload->clear_options = cpu_to_le32(options);
481
482	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
483			  opcode);
484}
485
486/* Skip IP packet processing on the next data transfer on a TX channel */
487static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
488{
489	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
490	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
491	struct ipa_cmd_ip_packet_init *payload;
492	union ipa_cmd_payload *cmd_payload;
493	dma_addr_t payload_addr;
494
495	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
496	payload = &cmd_payload->ip_packet_init;
497
498	if (ipa->version < IPA_VERSION_5_0) {
499		payload->dest_endpoint =
500			u8_encode_bits(endpoint_id,
501				       IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
502	} else {
503		payload->dest_endpoint = endpoint_id;
504	}
505
506	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
507			  opcode);
508}
509
510/* Use a DMA command to read or write a block of IPA-resident memory */
511void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
512				dma_addr_t addr, bool toward_ipa)
513{
514	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
515	enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
516	struct ipa_cmd_hw_dma_mem_mem *payload;
517	union ipa_cmd_payload *cmd_payload;
518	dma_addr_t payload_addr;
519	u16 flags;
520
521	/* size and offset must fit in 16 bit fields */
522	WARN_ON(!size);
523	WARN_ON(size > U16_MAX);
524	WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
525
526	offset += ipa->mem_offset;
527
528	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
529	payload = &cmd_payload->dma_shared_mem;
530
531	/* payload->clear_after_read was reserved prior to IPA v4.0.  It's
532	 * never needed for current code, so it's 0 regardless of version.
533	 */
534	payload->size = cpu_to_le16(size);
535	payload->local_addr = cpu_to_le16(offset);
536	/* payload->flags:
537	 *   direction:		0 = write to IPA, 1 read from IPA
538	 * Starting at v4.0 these are reserved; either way, all zero:
539	 *   pipeline clear:	0 = wait for pipeline clear (don't skip)
540	 *   clear_options:	0 = pipeline_clear_hps
541	 * Instead, for v4.0+ these are encoded in the opcode.  But again
542	 * since both values are 0 we won't bother OR'ing them in.
543	 */
544	flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
545	payload->flags = cpu_to_le16(flags);
546	payload->system_addr = cpu_to_le64(addr);
547
548	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
549			  opcode);
550}
551
552static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
553{
554	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
555	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
556	struct ipa_cmd_ip_packet_tag_status *payload;
557	union ipa_cmd_payload *cmd_payload;
558	dma_addr_t payload_addr;
559
560	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
561	payload = &cmd_payload->ip_packet_tag_status;
562
563	payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
564
565	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
566			  opcode);
567}
568
569/* Issue a small command TX data transfer */
570static void ipa_cmd_transfer_add(struct gsi_trans *trans)
571{
572	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
573	enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
574	union ipa_cmd_payload *payload;
575	dma_addr_t payload_addr;
576
577	/* Just transfer a zero-filled payload structure */
578	payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
579
580	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
581			  opcode);
582}
583
584/* Add immediate commands to a transaction to clear the hardware pipeline */
585void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
586{
587	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
588	struct ipa_endpoint *endpoint;
589
590	/* This will complete when the transfer is received */
591	reinit_completion(&ipa->completion);
592
593	/* Issue a no-op register write command (mask 0 means no write) */
594	ipa_cmd_register_write_add(trans, 0, 0, 0, true);
595
596	/* Send a data packet through the IPA pipeline.  The packet_init
597	 * command says to send the next packet directly to the exception
598	 * endpoint without any other IPA processing.  The tag_status
599	 * command requests that status be generated on completion of
600	 * that transfer, and that it will be tagged with a value.
601	 * Finally, the transfer command sends a small packet of data
602	 * (instead of a command) using the command endpoint.
603	 */
604	endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
605	ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
606	ipa_cmd_ip_tag_status_add(trans);
607	ipa_cmd_transfer_add(trans);
608}
609
610/* Returns the number of commands required to clear the pipeline */
611u32 ipa_cmd_pipeline_clear_count(void)
612{
613	return 4;
614}
615
616void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
617{
618	wait_for_completion(&ipa->completion);
619}
620
621/* Allocate a transaction for the command TX endpoint */
622struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
623{
624	struct ipa_endpoint *endpoint;
625
626	if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
627		return NULL;
628
629	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
630
631	return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
632				       tre_count, DMA_NONE);
633}
634
635/* Init function for immediate commands; there is no ipa_cmd_exit() */
636int ipa_cmd_init(struct ipa *ipa)
637{
638	ipa_cmd_validate_build();
639
640	if (!ipa_cmd_header_init_local_valid(ipa))
641		return -EINVAL;
642
643	if (!ipa_cmd_register_write_valid(ipa))
644		return -EINVAL;
645
646	return 0;
647}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
  4 * Copyright (C) 2019-2024 Linaro Ltd.
  5 */
  6
 
 
 
  7#include <linux/bitfield.h>
  8#include <linux/bits.h>
  9#include <linux/device.h>
 10#include <linux/dma-direction.h>
 11#include <linux/types.h>
 12
 13#include "gsi.h"
 14#include "gsi_trans.h"
 15#include "ipa.h"
 
 
 16#include "ipa_cmd.h"
 17#include "ipa_endpoint.h"
 18#include "ipa_mem.h"
 19#include "ipa_reg.h"
 20#include "ipa_table.h"
 21
 22/**
 23 * DOC:  IPA Immediate Commands
 24 *
 25 * The AP command TX endpoint is used to issue immediate commands to the IPA.
 26 * An immediate command is generally used to request the IPA do something
 27 * other than data transfer to another endpoint.
 28 *
 29 * Immediate commands are represented by GSI transactions just like other
 30 * transfer requests, and use a single GSI TRE.  Each immediate command
 31 * has a well-defined format, having a payload of a known length.  This
 32 * allows the transfer element's length field to be used to hold an
 33 * immediate command's opcode.  The payload for a command resides in AP
 34 * memory and is described by a single scatterlist entry in its transaction.
 35 * Commands do not require a transaction completion callback, and are
 36 * always issued using gsi_trans_commit_wait().
 37 */
 38
 39/* Some commands can wait until indicated pipeline stages are clear */
 40enum pipeline_clear_options {
 41	pipeline_clear_hps		= 0x0,
 42	pipeline_clear_src_grp		= 0x1,
 43	pipeline_clear_full		= 0x2,
 44};
 45
 46/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
 47
 48struct ipa_cmd_hw_ip_fltrt_init {
 49	__le64 hash_rules_addr;
 50	__le64 flags;
 51	__le64 nhash_rules_addr;
 52};
 53
 54/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
 55#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK			GENMASK_ULL(11, 0)
 56#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK			GENMASK_ULL(27, 12)
 57#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK			GENMASK_ULL(39, 28)
 58#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK			GENMASK_ULL(55, 40)
 59
 60/* IPA_CMD_HDR_INIT_LOCAL */
 61
 62struct ipa_cmd_hw_hdr_init_local {
 63	__le64 hdr_table_addr;
 64	__le32 flags;
 65	__le32 reserved;
 66};
 67
 68/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
 69#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK		GENMASK(11, 0)
 70#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK		GENMASK(27, 12)
 71
 72/* IPA_CMD_REGISTER_WRITE */
 73
 74/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
 75#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
 76#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
 77
 78struct ipa_cmd_register_write {
 79	__le16 flags;		/* Unused/reserved prior to IPA v4.0 */
 80	__le16 offset;
 81	__le32 value;
 82	__le32 value_mask;
 83	__le32 clear_options;	/* Unused/reserved for IPA v4.0+ */
 84};
 85
 86/* Field masks for ipa_cmd_register_write structure fields */
 87/* The next field is present for IPA v4.0+ */
 88#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK		GENMASK(14, 11)
 89/* The next field is not present for IPA v4.0+ */
 90#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK		GENMASK(15, 15)
 91
 92/* The next field and its values are not present for IPA v4.0+ */
 93#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK		GENMASK(1, 0)
 94
 95/* IPA_CMD_IP_PACKET_INIT */
 96
 97struct ipa_cmd_ip_packet_init {
 98	u8 dest_endpoint;	/* Full 8 bits used for IPA v5.0+ */
 99	u8 reserved[7];
100};
101
102/* Field mask for ipa_cmd_ip_packet_init dest_endpoint field (unused v5.0+) */
103#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK		GENMASK(4, 0)
104
105/* IPA_CMD_DMA_SHARED_MEM */
106
107/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
108
109#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK		GENMASK(8, 8)
110#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK	GENMASK(10, 9)
111
112struct ipa_cmd_hw_dma_mem_mem {
113	__le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
114	__le16 size;
115	__le16 local_addr;
116	__le16 flags;
117	__le64 system_addr;
118};
119
120/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
121#define DMA_SHARED_MEM_CLEAR_AFTER_READ			GENMASK(15, 15)
122
123/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
124#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK		GENMASK(0, 0)
125/* The next two fields are not present for IPA v4.0+ */
126#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK		GENMASK(1, 1)
127#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK	GENMASK(3, 2)
128
129/* IPA_CMD_IP_PACKET_TAG_STATUS */
130
131struct ipa_cmd_ip_packet_tag_status {
132	__le64 tag;
133};
134
135#define IP_PACKET_TAG_STATUS_TAG_FMASK			GENMASK_ULL(63, 16)
136
137/* Immediate command payload */
138union ipa_cmd_payload {
139	struct ipa_cmd_hw_ip_fltrt_init table_init;
140	struct ipa_cmd_hw_hdr_init_local hdr_init_local;
141	struct ipa_cmd_register_write register_write;
142	struct ipa_cmd_ip_packet_init ip_packet_init;
143	struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
144	struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
145};
146
147static void ipa_cmd_validate_build(void)
148{
149	/* The size of a filter table needs to fit into fields in the
150	 * ipa_cmd_hw_ip_fltrt_init structure.  Although hashed tables
151	 * might not be used, non-hashed and hashed tables have the same
152	 * maximum size.  IPv4 and IPv6 filter tables have the same number
153	 * of entries.
154	 */
155	/* Hashed and non-hashed fields are assumed to be the same size */
156	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
157		     field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
158	BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
159		     field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
160
161	/* Prior to IPA v5.0, we supported no more than 32 endpoints,
162	 * and this was reflected in some 5-bit fields that held
163	 * endpoint numbers.  Starting with IPA v5.0, the widths of
164	 * these fields were extended to 8 bits, meaning up to 256
165	 * endpoints.  If the driver claims to support more than
166	 * that it's an error.
167	 */
168	BUILD_BUG_ON(IPA_ENDPOINT_MAX - 1 > U8_MAX);
169}
170
171/* Validate a memory region holding a table */
172bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
173			      bool route)
174{
175	u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
176	u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
177	const char *table = route ? "route" : "filter";
178	struct device *dev = ipa->dev;
179	u32 size;
180
181	size = route ? ipa->route_count : ipa->filter_count + 1;
182	size *= sizeof(__le64);
183
184	/* Size must fit in the immediate command field that holds it */
185	if (size > size_max) {
186		dev_err(dev, "%s table region size too large\n", table);
187		dev_err(dev, "    (0x%04x > 0x%04x)\n", size, size_max);
188
189		return false;
190	}
191
192	/* Offset must fit in the immediate command field that holds it */
193	if (mem->offset > offset_max ||
194	    ipa->mem_offset > offset_max - mem->offset) {
195		dev_err(dev, "%s table region offset too large\n", table);
196		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
197			ipa->mem_offset, mem->offset, offset_max);
198
199		return false;
200	}
201
202	return true;
203}
204
205/* Validate the memory region that holds headers */
206static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
207{
208	struct device *dev = ipa->dev;
209	const struct ipa_mem *mem;
210	u32 offset_max;
211	u32 size_max;
212	u32 offset;
213	u32 size;
214
215	/* In ipa_cmd_hdr_init_local_add() we record the offset and size of
216	 * the header table memory area in an immediate command.  Make sure
217	 * the offset and size fit in the fields that need to hold them, and
218	 * that the entire range is within the overall IPA memory range.
219	 */
220	offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
221	size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
222
223	/* The header memory area contains both the modem and AP header
224	 * regions.  The modem portion defines the address of the region.
225	 */
226	mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
227	offset = mem->offset;
228	size = mem->size;
229
230	/* Make sure the offset fits in the IPA command */
231	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
232		dev_err(dev, "header table region offset too large\n");
233		dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
234			ipa->mem_offset, offset, offset_max);
235
236		return false;
237	}
238
239	/* Add the size of the AP portion (if defined) to the combined size */
240	mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
241	if (mem)
242		size += mem->size;
243
244	/* Make sure the combined size fits in the IPA command */
245	if (size > size_max) {
246		dev_err(dev, "header table region size too large\n");
247		dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
248
249		return false;
250	}
251
252	return true;
253}
254
255/* Indicate whether an offset can be used with a register_write command */
256static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
257						const char *name, u32 offset)
258{
259	struct ipa_cmd_register_write *payload;
260	struct device *dev = ipa->dev;
261	u32 offset_max;
262	u32 bit_count;
263
264	/* The maximum offset in a register_write immediate command depends
265	 * on the version of IPA.  A 16 bit offset is always supported,
266	 * but starting with IPA v4.0 some additional high-order bits are
267	 * allowed.
268	 */
269	bit_count = BITS_PER_BYTE * sizeof(payload->offset);
270	if (ipa->version >= IPA_VERSION_4_0)
271		bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
272	BUILD_BUG_ON(bit_count > 32);
273	offset_max = ~0U >> (32 - bit_count);
274
275	/* Make sure the offset can be represented by the field(s)
276	 * that holds it.  Also make sure the offset is not outside
277	 * the overall IPA memory range.
278	 */
279	if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
280		dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
281			name, ipa->mem_offset, offset, offset_max);
282		return false;
283	}
284
285	return true;
286}
287
288/* Check whether offsets passed to register_write are valid */
289static bool ipa_cmd_register_write_valid(struct ipa *ipa)
290{
291	const struct reg *reg;
292	const char *name;
293	u32 offset;
294
295	/* If hashed tables are supported, ensure the hash flush register
296	 * offset will fit in a register write IPA immediate command.
297	 */
298	if (ipa_table_hash_support(ipa)) {
299		if (ipa->version < IPA_VERSION_5_0)
300			reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
301		else
302			reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH);
303
304		offset = reg_offset(reg);
305		name = "filter/route hash flush";
306		if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
307			return false;
308	}
309
310	/* Each endpoint can have a status endpoint associated with it,
311	 * and this is recorded in an endpoint register.  If the modem
312	 * crashes, we reset the status endpoint for all modem endpoints
313	 * using a register write IPA immediate command.  Make sure the
314	 * worst case (highest endpoint number) offset of that endpoint
315	 * fits in the register write command field(s) that must hold it.
316	 */
317	reg = ipa_reg(ipa, ENDP_STATUS);
318	offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
319	name = "maximal endpoint status";
320	if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
321		return false;
322
323	return true;
324}
325
326int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
327{
328	struct gsi_trans_info *trans_info = &channel->trans_info;
329	struct device *dev = channel->gsi->dev;
330
331	/* Command payloads are allocated one at a time, but a single
332	 * transaction can require up to the maximum supported by the
333	 * channel; treat them as if they were allocated all at once.
334	 */
335	return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
336				       sizeof(union ipa_cmd_payload),
337				       tre_max, channel->trans_tre_max);
338}
339
340void ipa_cmd_pool_exit(struct gsi_channel *channel)
341{
342	struct gsi_trans_info *trans_info = &channel->trans_info;
343	struct device *dev = channel->gsi->dev;
344
345	gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
346}
347
348static union ipa_cmd_payload *
349ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
350{
351	struct gsi_trans_info *trans_info;
352	struct ipa_endpoint *endpoint;
353
354	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
355	trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
356
357	return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
358}
359
360/* If hash_size is 0, hash_offset and hash_addr ignored. */
361void ipa_cmd_table_init_add(struct gsi_trans *trans,
362			    enum ipa_cmd_opcode opcode, u16 size, u32 offset,
363			    dma_addr_t addr, u16 hash_size, u32 hash_offset,
364			    dma_addr_t hash_addr)
365{
366	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
367	struct ipa_cmd_hw_ip_fltrt_init *payload;
368	union ipa_cmd_payload *cmd_payload;
369	dma_addr_t payload_addr;
370	u64 val;
371
372	/* Record the non-hash table offset and size */
373	offset += ipa->mem_offset;
374	val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
375	val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
376
377	/* The hash table offset and address are zero if its size is 0 */
378	if (hash_size) {
379		/* Record the hash table offset and size */
380		hash_offset += ipa->mem_offset;
381		val |= u64_encode_bits(hash_offset,
382				       IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
383		val |= u64_encode_bits(hash_size,
384				       IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
385	}
386
387	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
388	payload = &cmd_payload->table_init;
389
390	/* Fill in all offsets and sizes and the non-hash table address */
391	if (hash_size)
392		payload->hash_rules_addr = cpu_to_le64(hash_addr);
393	payload->flags = cpu_to_le64(val);
394	payload->nhash_rules_addr = cpu_to_le64(addr);
395
396	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
397			  opcode);
398}
399
400/* Initialize header space in IPA-local memory */
401void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
402				dma_addr_t addr)
403{
404	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
405	enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
406	struct ipa_cmd_hw_hdr_init_local *payload;
407	union ipa_cmd_payload *cmd_payload;
408	dma_addr_t payload_addr;
409	u32 flags;
410
411	offset += ipa->mem_offset;
412
413	/* With this command we tell the IPA where in its local memory the
414	 * header tables reside.  The content of the buffer provided is
415	 * also written via DMA into that space.  The IPA hardware owns
416	 * the table, but the AP must initialize it.
417	 */
418	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
419	payload = &cmd_payload->hdr_init_local;
420
421	payload->hdr_table_addr = cpu_to_le64(addr);
422	flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
423	flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
424	payload->flags = cpu_to_le32(flags);
425
426	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
427			  opcode);
428}
429
430void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
431				u32 mask, bool clear_full)
432{
433	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
434	struct ipa_cmd_register_write *payload;
435	union ipa_cmd_payload *cmd_payload;
436	u32 opcode = IPA_CMD_REGISTER_WRITE;
437	dma_addr_t payload_addr;
438	u32 clear_option;
439	u32 options;
440	u16 flags;
441
442	/* pipeline_clear_src_grp is not used */
443	clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
444
445	/* IPA v4.0+ represents the pipeline clear options in the opcode.  It
446	 * also supports a larger offset by encoding additional high-order
447	 * bits in the payload flags field.
448	 */
449	if (ipa->version >= IPA_VERSION_4_0) {
450		u16 offset_high;
451		u32 val;
452
453		/* Opcode encodes pipeline clear options */
454		/* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
455		val = u16_encode_bits(clear_option,
456				      REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
457		opcode |= val;
458
459		/* Extract the high 4 bits from the offset */
460		offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
461		offset &= (1 << 16) - 1;
462
463		/* Extract the top 4 bits and encode it into the flags field */
464		flags = u16_encode_bits(offset_high,
465				REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
466		options = 0;	/* reserved */
467
468	} else {
469		flags = 0;	/* SKIP_CLEAR flag is always 0 */
470		options = u16_encode_bits(clear_option,
471					  REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
472	}
473
474	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
475	payload = &cmd_payload->register_write;
476
477	payload->flags = cpu_to_le16(flags);
478	payload->offset = cpu_to_le16((u16)offset);
479	payload->value = cpu_to_le32(value);
480	payload->value_mask = cpu_to_le32(mask);
481	payload->clear_options = cpu_to_le32(options);
482
483	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
484			  opcode);
485}
486
487/* Skip IP packet processing on the next data transfer on a TX channel */
488static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
489{
490	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
491	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
492	struct ipa_cmd_ip_packet_init *payload;
493	union ipa_cmd_payload *cmd_payload;
494	dma_addr_t payload_addr;
495
496	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
497	payload = &cmd_payload->ip_packet_init;
498
499	if (ipa->version < IPA_VERSION_5_0) {
500		payload->dest_endpoint =
501			u8_encode_bits(endpoint_id,
502				       IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
503	} else {
504		payload->dest_endpoint = endpoint_id;
505	}
506
507	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
508			  opcode);
509}
510
511/* Use a DMA command to read or write a block of IPA-resident memory */
512void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
513				dma_addr_t addr, bool toward_ipa)
514{
515	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
516	enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
517	struct ipa_cmd_hw_dma_mem_mem *payload;
518	union ipa_cmd_payload *cmd_payload;
519	dma_addr_t payload_addr;
520	u16 flags;
521
522	/* size and offset must fit in 16 bit fields */
523	WARN_ON(!size);
524	WARN_ON(size > U16_MAX);
525	WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
526
527	offset += ipa->mem_offset;
528
529	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
530	payload = &cmd_payload->dma_shared_mem;
531
532	/* payload->clear_after_read was reserved prior to IPA v4.0.  It's
533	 * never needed for current code, so it's 0 regardless of version.
534	 */
535	payload->size = cpu_to_le16(size);
536	payload->local_addr = cpu_to_le16(offset);
537	/* payload->flags:
538	 *   direction:		0 = write to IPA, 1 read from IPA
539	 * Starting at v4.0 these are reserved; either way, all zero:
540	 *   pipeline clear:	0 = wait for pipeline clear (don't skip)
541	 *   clear_options:	0 = pipeline_clear_hps
542	 * Instead, for v4.0+ these are encoded in the opcode.  But again
543	 * since both values are 0 we won't bother OR'ing them in.
544	 */
545	flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
546	payload->flags = cpu_to_le16(flags);
547	payload->system_addr = cpu_to_le64(addr);
548
549	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
550			  opcode);
551}
552
553static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
554{
555	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
556	enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
557	struct ipa_cmd_ip_packet_tag_status *payload;
558	union ipa_cmd_payload *cmd_payload;
559	dma_addr_t payload_addr;
560
561	cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
562	payload = &cmd_payload->ip_packet_tag_status;
563
564	payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
565
566	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
567			  opcode);
568}
569
570/* Issue a small command TX data transfer */
571static void ipa_cmd_transfer_add(struct gsi_trans *trans)
572{
573	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
574	enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
575	union ipa_cmd_payload *payload;
576	dma_addr_t payload_addr;
577
578	/* Just transfer a zero-filled payload structure */
579	payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
580
581	gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
582			  opcode);
583}
584
585/* Add immediate commands to a transaction to clear the hardware pipeline */
586void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
587{
588	struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
589	struct ipa_endpoint *endpoint;
590
591	/* This will complete when the transfer is received */
592	reinit_completion(&ipa->completion);
593
594	/* Issue a no-op register write command (mask 0 means no write) */
595	ipa_cmd_register_write_add(trans, 0, 0, 0, true);
596
597	/* Send a data packet through the IPA pipeline.  The packet_init
598	 * command says to send the next packet directly to the exception
599	 * endpoint without any other IPA processing.  The tag_status
600	 * command requests that status be generated on completion of
601	 * that transfer, and that it will be tagged with a value.
602	 * Finally, the transfer command sends a small packet of data
603	 * (instead of a command) using the command endpoint.
604	 */
605	endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
606	ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
607	ipa_cmd_ip_tag_status_add(trans);
608	ipa_cmd_transfer_add(trans);
609}
610
611/* Returns the number of commands required to clear the pipeline */
612u32 ipa_cmd_pipeline_clear_count(void)
613{
614	return 4;
615}
616
617void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
618{
619	wait_for_completion(&ipa->completion);
620}
621
622/* Allocate a transaction for the command TX endpoint */
623struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
624{
625	struct ipa_endpoint *endpoint;
626
627	if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
628		return NULL;
629
630	endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
631
632	return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
633				       tre_count, DMA_NONE);
634}
635
636/* Init function for immediate commands; there is no ipa_cmd_exit() */
637int ipa_cmd_init(struct ipa *ipa)
638{
639	ipa_cmd_validate_build();
640
641	if (!ipa_cmd_header_init_local_valid(ipa))
642		return -EINVAL;
643
644	if (!ipa_cmd_register_write_valid(ipa))
645		return -EINVAL;
646
647	return 0;
648}