Loading...
Note: File does not exist in v4.17.
1// SPDX-License-Identifier: GPL-2.0
2
3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2022 Linaro Ltd.
5 */
6
7#include <linux/types.h>
8#include <linux/device.h>
9#include <linux/slab.h>
10#include <linux/bitfield.h>
11#include <linux/dma-direction.h>
12
13#include "gsi.h"
14#include "gsi_trans.h"
15#include "ipa.h"
16#include "ipa_endpoint.h"
17#include "ipa_table.h"
18#include "ipa_cmd.h"
19#include "ipa_mem.h"
20
21/**
22 * DOC: IPA Immediate Commands
23 *
24 * The AP command TX endpoint is used to issue immediate commands to the IPA.
25 * An immediate command is generally used to request the IPA do something
26 * other than data transfer to another endpoint.
27 *
28 * Immediate commands are represented by GSI transactions just like other
29 * transfer requests, and use a single GSI TRE. Each immediate command
30 * has a well-defined format, having a payload of a known length. This
31 * allows the transfer element's length field to be used to hold an
32 * immediate command's opcode. The payload for a command resides in AP
33 * memory and is described by a single scatterlist entry in its transaction.
34 * Commands do not require a transaction completion callback, and are
35 * always issued using gsi_trans_commit_wait().
36 */
37
38/* Some commands can wait until indicated pipeline stages are clear */
39enum pipeline_clear_options {
40 pipeline_clear_hps = 0x0,
41 pipeline_clear_src_grp = 0x1,
42 pipeline_clear_full = 0x2,
43};
44
45/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
46
47struct ipa_cmd_hw_ip_fltrt_init {
48 __le64 hash_rules_addr;
49 __le64 flags;
50 __le64 nhash_rules_addr;
51};
52
53/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
54#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK GENMASK_ULL(11, 0)
55#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK GENMASK_ULL(27, 12)
56#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK GENMASK_ULL(39, 28)
57#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK GENMASK_ULL(55, 40)
58
59/* IPA_CMD_HDR_INIT_LOCAL */
60
61struct ipa_cmd_hw_hdr_init_local {
62 __le64 hdr_table_addr;
63 __le32 flags;
64 __le32 reserved;
65};
66
67/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
68#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK GENMASK(11, 0)
69#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK GENMASK(27, 12)
70
71/* IPA_CMD_REGISTER_WRITE */
72
73/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
74#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
75#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
76
77struct ipa_cmd_register_write {
78 __le16 flags; /* Unused/reserved prior to IPA v4.0 */
79 __le16 offset;
80 __le32 value;
81 __le32 value_mask;
82 __le32 clear_options; /* Unused/reserved for IPA v4.0+ */
83};
84
85/* Field masks for ipa_cmd_register_write structure fields */
86/* The next field is present for IPA v4.0+ */
87#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK GENMASK(14, 11)
88/* The next field is not present for IPA v4.0+ */
89#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK GENMASK(15, 15)
90
91/* The next field and its values are not present for IPA v4.0+ */
92#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK GENMASK(1, 0)
93
94/* IPA_CMD_IP_PACKET_INIT */
95
96struct ipa_cmd_ip_packet_init {
97 u8 dest_endpoint;
98 u8 reserved[7];
99};
100
101/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
102#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK GENMASK(4, 0)
103
104/* IPA_CMD_DMA_SHARED_MEM */
105
106/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
107
108#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK GENMASK(8, 8)
109#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK GENMASK(10, 9)
110
111struct ipa_cmd_hw_dma_mem_mem {
112 __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
113 __le16 size;
114 __le16 local_addr;
115 __le16 flags;
116 __le64 system_addr;
117};
118
119/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
120#define DMA_SHARED_MEM_CLEAR_AFTER_READ GENMASK(15, 15)
121
122/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
123#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK GENMASK(0, 0)
124/* The next two fields are not present for IPA v4.0+ */
125#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK GENMASK(1, 1)
126#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK GENMASK(3, 2)
127
128/* IPA_CMD_IP_PACKET_TAG_STATUS */
129
130struct ipa_cmd_ip_packet_tag_status {
131 __le64 tag;
132};
133
134#define IP_PACKET_TAG_STATUS_TAG_FMASK GENMASK_ULL(63, 16)
135
136/* Immediate command payload */
137union ipa_cmd_payload {
138 struct ipa_cmd_hw_ip_fltrt_init table_init;
139 struct ipa_cmd_hw_hdr_init_local hdr_init_local;
140 struct ipa_cmd_register_write register_write;
141 struct ipa_cmd_ip_packet_init ip_packet_init;
142 struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
143 struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
144};
145
146static void ipa_cmd_validate_build(void)
147{
148 /* The size of a filter table needs to fit into fields in the
149 * ipa_cmd_hw_ip_fltrt_init structure. Although hashed tables
150 * might not be used, non-hashed and hashed tables have the same
151 * maximum size. IPv4 and IPv6 filter tables have the same number
152 * of entries.
153 */
154 /* Hashed and non-hashed fields are assumed to be the same size */
155 BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
156 field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
157 BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
158 field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
159
160 /* Valid endpoint numbers must fit in the IP packet init command */
161 BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
162 IPA_ENDPOINT_MAX - 1);
163}
164
165/* Validate a memory region holding a table */
166bool ipa_cmd_table_init_valid(struct ipa *ipa, const struct ipa_mem *mem,
167 bool route)
168{
169 u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
170 u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
171 const char *table = route ? "route" : "filter";
172 struct device *dev = &ipa->pdev->dev;
173 u32 size;
174
175 size = route ? ipa->route_count : ipa->filter_count + 1;
176 size *= sizeof(__le64);
177
178 /* Size must fit in the immediate command field that holds it */
179 if (size > size_max) {
180 dev_err(dev, "%s table region size too large\n", table);
181 dev_err(dev, " (0x%04x > 0x%04x)\n", size, size_max);
182
183 return false;
184 }
185
186 /* Offset must fit in the immediate command field that holds it */
187 if (mem->offset > offset_max ||
188 ipa->mem_offset > offset_max - mem->offset) {
189 dev_err(dev, "%s table region offset too large\n", table);
190 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
191 ipa->mem_offset, mem->offset, offset_max);
192
193 return false;
194 }
195
196 return true;
197}
198
199/* Validate the memory region that holds headers */
200static bool ipa_cmd_header_init_local_valid(struct ipa *ipa)
201{
202 struct device *dev = &ipa->pdev->dev;
203 const struct ipa_mem *mem;
204 u32 offset_max;
205 u32 size_max;
206 u32 offset;
207 u32 size;
208
209 /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
210 * the header table memory area in an immediate command. Make sure
211 * the offset and size fit in the fields that need to hold them, and
212 * that the entire range is within the overall IPA memory range.
213 */
214 offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
215 size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
216
217 /* The header memory area contains both the modem and AP header
218 * regions. The modem portion defines the address of the region.
219 */
220 mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
221 offset = mem->offset;
222 size = mem->size;
223
224 /* Make sure the offset fits in the IPA command */
225 if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
226 dev_err(dev, "header table region offset too large\n");
227 dev_err(dev, " (0x%04x + 0x%04x > 0x%04x)\n",
228 ipa->mem_offset, offset, offset_max);
229
230 return false;
231 }
232
233 /* Add the size of the AP portion (if defined) to the combined size */
234 mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
235 if (mem)
236 size += mem->size;
237
238 /* Make sure the combined size fits in the IPA command */
239 if (size > size_max) {
240 dev_err(dev, "header table region size too large\n");
241 dev_err(dev, " (0x%04x > 0x%08x)\n", size, size_max);
242
243 return false;
244 }
245
246 return true;
247}
248
249/* Indicate whether an offset can be used with a register_write command */
250static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
251 const char *name, u32 offset)
252{
253 struct ipa_cmd_register_write *payload;
254 struct device *dev = &ipa->pdev->dev;
255 u32 offset_max;
256 u32 bit_count;
257
258 /* The maximum offset in a register_write immediate command depends
259 * on the version of IPA. A 16 bit offset is always supported,
260 * but starting with IPA v4.0 some additional high-order bits are
261 * allowed.
262 */
263 bit_count = BITS_PER_BYTE * sizeof(payload->offset);
264 if (ipa->version >= IPA_VERSION_4_0)
265 bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
266 BUILD_BUG_ON(bit_count > 32);
267 offset_max = ~0U >> (32 - bit_count);
268
269 /* Make sure the offset can be represented by the field(s)
270 * that holds it. Also make sure the offset is not outside
271 * the overall IPA memory range.
272 */
273 if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
274 dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
275 name, ipa->mem_offset, offset, offset_max);
276 return false;
277 }
278
279 return true;
280}
281
282/* Check whether offsets passed to register_write are valid */
283static bool ipa_cmd_register_write_valid(struct ipa *ipa)
284{
285 const struct ipa_reg *reg;
286 const char *name;
287 u32 offset;
288
289 /* If hashed tables are supported, ensure the hash flush register
290 * offset will fit in a register write IPA immediate command.
291 */
292 if (ipa_table_hash_support(ipa)) {
293 reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
294 offset = ipa_reg_offset(reg);
295 name = "filter/route hash flush";
296 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
297 return false;
298 }
299
300 /* Each endpoint can have a status endpoint associated with it,
301 * and this is recorded in an endpoint register. If the modem
302 * crashes, we reset the status endpoint for all modem endpoints
303 * using a register write IPA immediate command. Make sure the
304 * worst case (highest endpoint number) offset of that endpoint
305 * fits in the register write command field(s) that must hold it.
306 */
307 reg = ipa_reg(ipa, ENDP_STATUS);
308 offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
309 name = "maximal endpoint status";
310 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
311 return false;
312
313 return true;
314}
315
316int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
317{
318 struct gsi_trans_info *trans_info = &channel->trans_info;
319 struct device *dev = channel->gsi->dev;
320
321 /* Command payloads are allocated one at a time, but a single
322 * transaction can require up to the maximum supported by the
323 * channel; treat them as if they were allocated all at once.
324 */
325 return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
326 sizeof(union ipa_cmd_payload),
327 tre_max, channel->trans_tre_max);
328}
329
330void ipa_cmd_pool_exit(struct gsi_channel *channel)
331{
332 struct gsi_trans_info *trans_info = &channel->trans_info;
333 struct device *dev = channel->gsi->dev;
334
335 gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
336}
337
338static union ipa_cmd_payload *
339ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
340{
341 struct gsi_trans_info *trans_info;
342 struct ipa_endpoint *endpoint;
343
344 endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
345 trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
346
347 return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
348}
349
350/* If hash_size is 0, hash_offset and hash_addr ignored. */
351void ipa_cmd_table_init_add(struct gsi_trans *trans,
352 enum ipa_cmd_opcode opcode, u16 size, u32 offset,
353 dma_addr_t addr, u16 hash_size, u32 hash_offset,
354 dma_addr_t hash_addr)
355{
356 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
357 struct ipa_cmd_hw_ip_fltrt_init *payload;
358 union ipa_cmd_payload *cmd_payload;
359 dma_addr_t payload_addr;
360 u64 val;
361
362 /* Record the non-hash table offset and size */
363 offset += ipa->mem_offset;
364 val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
365 val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
366
367 /* The hash table offset and address are zero if its size is 0 */
368 if (hash_size) {
369 /* Record the hash table offset and size */
370 hash_offset += ipa->mem_offset;
371 val |= u64_encode_bits(hash_offset,
372 IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
373 val |= u64_encode_bits(hash_size,
374 IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
375 }
376
377 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
378 payload = &cmd_payload->table_init;
379
380 /* Fill in all offsets and sizes and the non-hash table address */
381 if (hash_size)
382 payload->hash_rules_addr = cpu_to_le64(hash_addr);
383 payload->flags = cpu_to_le64(val);
384 payload->nhash_rules_addr = cpu_to_le64(addr);
385
386 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
387 opcode);
388}
389
390/* Initialize header space in IPA-local memory */
391void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
392 dma_addr_t addr)
393{
394 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
395 enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
396 struct ipa_cmd_hw_hdr_init_local *payload;
397 union ipa_cmd_payload *cmd_payload;
398 dma_addr_t payload_addr;
399 u32 flags;
400
401 offset += ipa->mem_offset;
402
403 /* With this command we tell the IPA where in its local memory the
404 * header tables reside. The content of the buffer provided is
405 * also written via DMA into that space. The IPA hardware owns
406 * the table, but the AP must initialize it.
407 */
408 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
409 payload = &cmd_payload->hdr_init_local;
410
411 payload->hdr_table_addr = cpu_to_le64(addr);
412 flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
413 flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
414 payload->flags = cpu_to_le32(flags);
415
416 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
417 opcode);
418}
419
420void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
421 u32 mask, bool clear_full)
422{
423 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
424 struct ipa_cmd_register_write *payload;
425 union ipa_cmd_payload *cmd_payload;
426 u32 opcode = IPA_CMD_REGISTER_WRITE;
427 dma_addr_t payload_addr;
428 u32 clear_option;
429 u32 options;
430 u16 flags;
431
432 /* pipeline_clear_src_grp is not used */
433 clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
434
435 /* IPA v4.0+ represents the pipeline clear options in the opcode. It
436 * also supports a larger offset by encoding additional high-order
437 * bits in the payload flags field.
438 */
439 if (ipa->version >= IPA_VERSION_4_0) {
440 u16 offset_high;
441 u32 val;
442
443 /* Opcode encodes pipeline clear options */
444 /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
445 val = u16_encode_bits(clear_option,
446 REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
447 opcode |= val;
448
449 /* Extract the high 4 bits from the offset */
450 offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
451 offset &= (1 << 16) - 1;
452
453 /* Extract the top 4 bits and encode it into the flags field */
454 flags = u16_encode_bits(offset_high,
455 REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
456 options = 0; /* reserved */
457
458 } else {
459 flags = 0; /* SKIP_CLEAR flag is always 0 */
460 options = u16_encode_bits(clear_option,
461 REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
462 }
463
464 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
465 payload = &cmd_payload->register_write;
466
467 payload->flags = cpu_to_le16(flags);
468 payload->offset = cpu_to_le16((u16)offset);
469 payload->value = cpu_to_le32(value);
470 payload->value_mask = cpu_to_le32(mask);
471 payload->clear_options = cpu_to_le32(options);
472
473 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
474 opcode);
475}
476
477/* Skip IP packet processing on the next data transfer on a TX channel */
478static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
479{
480 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
481 enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
482 struct ipa_cmd_ip_packet_init *payload;
483 union ipa_cmd_payload *cmd_payload;
484 dma_addr_t payload_addr;
485
486 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
487 payload = &cmd_payload->ip_packet_init;
488
489 payload->dest_endpoint = u8_encode_bits(endpoint_id,
490 IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
491
492 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
493 opcode);
494}
495
496/* Use a DMA command to read or write a block of IPA-resident memory */
497void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
498 dma_addr_t addr, bool toward_ipa)
499{
500 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
501 enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
502 struct ipa_cmd_hw_dma_mem_mem *payload;
503 union ipa_cmd_payload *cmd_payload;
504 dma_addr_t payload_addr;
505 u16 flags;
506
507 /* size and offset must fit in 16 bit fields */
508 WARN_ON(!size);
509 WARN_ON(size > U16_MAX);
510 WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
511
512 offset += ipa->mem_offset;
513
514 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
515 payload = &cmd_payload->dma_shared_mem;
516
517 /* payload->clear_after_read was reserved prior to IPA v4.0. It's
518 * never needed for current code, so it's 0 regardless of version.
519 */
520 payload->size = cpu_to_le16(size);
521 payload->local_addr = cpu_to_le16(offset);
522 /* payload->flags:
523 * direction: 0 = write to IPA, 1 read from IPA
524 * Starting at v4.0 these are reserved; either way, all zero:
525 * pipeline clear: 0 = wait for pipeline clear (don't skip)
526 * clear_options: 0 = pipeline_clear_hps
527 * Instead, for v4.0+ these are encoded in the opcode. But again
528 * since both values are 0 we won't bother OR'ing them in.
529 */
530 flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
531 payload->flags = cpu_to_le16(flags);
532 payload->system_addr = cpu_to_le64(addr);
533
534 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
535 opcode);
536}
537
538static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
539{
540 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
541 enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
542 struct ipa_cmd_ip_packet_tag_status *payload;
543 union ipa_cmd_payload *cmd_payload;
544 dma_addr_t payload_addr;
545
546 cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
547 payload = &cmd_payload->ip_packet_tag_status;
548
549 payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
550
551 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
552 opcode);
553}
554
555/* Issue a small command TX data transfer */
556static void ipa_cmd_transfer_add(struct gsi_trans *trans)
557{
558 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
559 enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
560 union ipa_cmd_payload *payload;
561 dma_addr_t payload_addr;
562
563 /* Just transfer a zero-filled payload structure */
564 payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
565
566 gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
567 opcode);
568}
569
570/* Add immediate commands to a transaction to clear the hardware pipeline */
571void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
572{
573 struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
574 struct ipa_endpoint *endpoint;
575
576 /* This will complete when the transfer is received */
577 reinit_completion(&ipa->completion);
578
579 /* Issue a no-op register write command (mask 0 means no write) */
580 ipa_cmd_register_write_add(trans, 0, 0, 0, true);
581
582 /* Send a data packet through the IPA pipeline. The packet_init
583 * command says to send the next packet directly to the exception
584 * endpoint without any other IPA processing. The tag_status
585 * command requests that status be generated on completion of
586 * that transfer, and that it will be tagged with a value.
587 * Finally, the transfer command sends a small packet of data
588 * (instead of a command) using the command endpoint.
589 */
590 endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
591 ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
592 ipa_cmd_ip_tag_status_add(trans);
593 ipa_cmd_transfer_add(trans);
594}
595
596/* Returns the number of commands required to clear the pipeline */
597u32 ipa_cmd_pipeline_clear_count(void)
598{
599 return 4;
600}
601
602void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
603{
604 wait_for_completion(&ipa->completion);
605}
606
607/* Allocate a transaction for the command TX endpoint */
608struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
609{
610 struct ipa_endpoint *endpoint;
611
612 if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
613 return NULL;
614
615 endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
616
617 return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
618 tre_count, DMA_NONE);
619}
620
621/* Init function for immediate commands; there is no ipa_cmd_exit() */
622int ipa_cmd_init(struct ipa *ipa)
623{
624 ipa_cmd_validate_build();
625
626 if (!ipa_cmd_header_init_local_valid(ipa))
627 return -EINVAL;
628
629 if (!ipa_cmd_register_write_valid(ipa))
630 return -EINVAL;
631
632 return 0;
633}