Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 * Copyright (c) 2015-2016 Quantenna Communications, Inc.
  3 * All rights reserved.
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License
  7 * as published by the Free Software Foundation; either version 2
  8 * of the License, or (at your option) any later version.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 */
 16
 17#include <linux/types.h>
 18#include <linux/io.h>
 19
 20#include "shm_ipc.h"
 21
 22#undef pr_fmt
 23#define pr_fmt(fmt)	"qtnfmac shm_ipc: %s: " fmt, __func__
 24
 25static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc)
 26{
 27	const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags);
 28
 29	return (flags & QTNF_SHM_IPC_NEW_DATA);
 30}
 31
 32static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc)
 33{
 34	size_t size;
 35	bool rx_buff_ok = true;
 36	struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
 37
 38	shm_reg_hdr = &ipc->shm_region->headroom.hdr;
 39
 40	size = readw(&shm_reg_hdr->data_len);
 41
 42	if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) {
 43		pr_err("wrong rx packet size: %zu\n", size);
 44		rx_buff_ok = false;
 45	} else {
 46		memcpy_fromio(ipc->rx_data, ipc->shm_region->data, size);
 47	}
 48
 49	writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags);
 50	readl(&shm_reg_hdr->flags); /* flush PCIe write */
 51
 52	ipc->interrupt.fn(ipc->interrupt.arg);
 53
 54	if (likely(rx_buff_ok)) {
 55		ipc->rx_packet_count++;
 56		ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->rx_data, size);
 57	}
 58}
 59
 60static void qtnf_shm_ipc_irq_work(struct work_struct *work)
 61{
 62	struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc,
 63						irq_work);
 64
 65	while (qtnf_shm_ipc_has_new_data(ipc))
 66		qtnf_shm_handle_new_data(ipc);
 67}
 68
 69static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc)
 70{
 71	u32 flags;
 72
 73	flags = readl(&ipc->shm_region->headroom.hdr.flags);
 74
 75	if (flags & QTNF_SHM_IPC_NEW_DATA)
 76		queue_work(ipc->workqueue, &ipc->irq_work);
 77}
 78
 79static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc)
 80{
 81	u32 flags;
 82
 83	if (!READ_ONCE(ipc->waiting_for_ack))
 84		return;
 85
 86	flags = readl(&ipc->shm_region->headroom.hdr.flags);
 87
 88	if (flags & QTNF_SHM_IPC_ACK) {
 89		WRITE_ONCE(ipc->waiting_for_ack, 0);
 90		complete(&ipc->tx_completion);
 91	}
 92}
 93
 94int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc,
 95		      enum qtnf_shm_ipc_direction direction,
 96		      struct qtnf_shm_ipc_region __iomem *shm_region,
 97		      struct workqueue_struct *workqueue,
 98		      const struct qtnf_shm_ipc_int *interrupt,
 99		      const struct qtnf_shm_ipc_rx_callback *rx_callback)
100{
101	BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) !=
102		     QTN_IPC_REG_HDR_SZ);
103	BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ);
104
105	ipc->shm_region = shm_region;
106	ipc->direction = direction;
107	ipc->interrupt = *interrupt;
108	ipc->rx_callback = *rx_callback;
109	ipc->tx_packet_count = 0;
110	ipc->rx_packet_count = 0;
111	ipc->workqueue = workqueue;
112	ipc->waiting_for_ack = 0;
113	ipc->tx_timeout_count = 0;
114
115	switch (direction) {
116	case QTNF_SHM_IPC_OUTBOUND:
117		ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler;
118		break;
119	case QTNF_SHM_IPC_INBOUND:
120		ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler;
121		break;
122	default:
123		return -EINVAL;
124	}
125
126	INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work);
127	init_completion(&ipc->tx_completion);
128
129	return 0;
130}
131
132void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc)
133{
134	complete_all(&ipc->tx_completion);
135}
136
137int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size)
138{
139	int ret = 0;
140	struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr;
141
142	shm_reg_hdr = &ipc->shm_region->headroom.hdr;
143
144	if (unlikely(size > QTN_IPC_MAX_DATA_SZ))
145		return -E2BIG;
146
147	ipc->tx_packet_count++;
148
149	writew(size, &shm_reg_hdr->data_len);
150	memcpy_toio(ipc->shm_region->data, buf, size);
151
152	/* sync previous writes before proceeding */
153	dma_wmb();
154
155	WRITE_ONCE(ipc->waiting_for_ack, 1);
156
157	/* sync previous memory write before announcing new data ready */
158	wmb();
159
160	writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags);
161	readl(&shm_reg_hdr->flags); /* flush PCIe write */
162
163	ipc->interrupt.fn(ipc->interrupt.arg);
164
165	if (!wait_for_completion_timeout(&ipc->tx_completion,
166					 QTN_SHM_IPC_ACK_TIMEOUT)) {
167		ret = -ETIMEDOUT;
168		ipc->tx_timeout_count++;
169		pr_err("TX ACK timeout\n");
170	}
171
172	/* now we're not waiting for ACK even in case of timeout */
173	WRITE_ONCE(ipc->waiting_for_ack, 0);
174
175	return ret;
176}