Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Intel MIC Platform Software Stack (MPSS)
  4 *
  5 * Copyright(c) 2014 Intel Corporation.
  6 *
  7 * Intel SCIF driver.
  8 */
  9#include <linux/circ_buf.h>
 10#include <linux/types.h>
 11#include <linux/io.h>
 12#include <linux/errno.h>
 13
 14#include "scif_rb.h"
 15
 16#define scif_rb_ring_cnt(head, tail, size) CIRC_CNT(head, tail, size)
 17#define scif_rb_ring_space(head, tail, size) CIRC_SPACE(head, tail, size)
 18
 19/**
 20 * scif_rb_init - Initializes the ring buffer
 21 * @rb: ring buffer
 22 * @read_ptr: A pointer to the read offset
 23 * @write_ptr: A pointer to the write offset
 24 * @rb_base: A pointer to the base of the ring buffer
 25 * @size: The size of the ring buffer in powers of two
 26 */
 27void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
 28		  void *rb_base, u8 size)
 29{
 30	rb->rb_base = rb_base;
 31	rb->size = (1 << size);
 32	rb->read_ptr = read_ptr;
 33	rb->write_ptr = write_ptr;
 34	rb->current_read_offset = *read_ptr;
 35	rb->current_write_offset = *write_ptr;
 36}
 37
 38/* Copies a message to the ring buffer -- handles the wrap around case */
 39static void memcpy_torb(struct scif_rb *rb, void *header,
 40			void *msg, u32 size)
 41{
 42	u32 size1, size2;
 43
 44	if (header + size >= rb->rb_base + rb->size) {
 45		/* Need to call two copies if it wraps around */
 46		size1 = (u32)(rb->rb_base + rb->size - header);
 47		size2 = size - size1;
 48		memcpy_toio((void __iomem __force *)header, msg, size1);
 49		memcpy_toio((void __iomem __force *)rb->rb_base,
 50			    msg + size1, size2);
 51	} else {
 52		memcpy_toio((void __iomem __force *)header, msg, size);
 53	}
 54}
 55
 56/* Copies a message from the ring buffer -- handles the wrap around case */
 57static void memcpy_fromrb(struct scif_rb *rb, void *header,
 58			  void *msg, u32 size)
 59{
 60	u32 size1, size2;
 61
 62	if (header + size >= rb->rb_base + rb->size) {
 63		/* Need to call two copies if it wraps around */
 64		size1 = (u32)(rb->rb_base + rb->size - header);
 65		size2 = size - size1;
 66		memcpy_fromio(msg, (void __iomem __force *)header, size1);
 67		memcpy_fromio(msg + size1,
 68			      (void __iomem __force *)rb->rb_base, size2);
 69	} else {
 70		memcpy_fromio(msg, (void __iomem __force *)header, size);
 71	}
 72}
 73
 74/**
 75 * scif_rb_space - Query space available for writing to the RB
 76 * @rb: ring buffer
 77 *
 78 * Return: size available for writing to RB in bytes.
 79 */
 80u32 scif_rb_space(struct scif_rb *rb)
 81{
 82	rb->current_read_offset = *rb->read_ptr;
 83	/*
 84	 * Update from the HW read pointer only once the peer has exposed the
 85	 * new empty slot. This barrier is paired with the memory barrier
 86	 * scif_rb_update_read_ptr()
 87	 */
 88	mb();
 89	return scif_rb_ring_space(rb->current_write_offset,
 90				  rb->current_read_offset, rb->size);
 91}
 92
 93/**
 94 * scif_rb_write - Write a message to the RB
 95 * @rb: ring buffer
 96 * @msg: buffer to send the message.  Must be at least size bytes long
 97 * @size: the size (in bytes) to be copied to the RB
 98 *
 99 * This API does not block if there isn't enough space in the RB.
100 * Returns: 0 on success or -ENOMEM on failure
101 */
102int scif_rb_write(struct scif_rb *rb, void *msg, u32 size)
103{
104	void *header;
105
106	if (scif_rb_space(rb) < size)
107		return -ENOMEM;
108	header = rb->rb_base + rb->current_write_offset;
109	memcpy_torb(rb, header, msg, size);
110	/*
111	 * Wait until scif_rb_commit(). Update the local ring
112	 * buffer data, not the shared data until commit.
113	 */
114	rb->current_write_offset =
115		(rb->current_write_offset + size) & (rb->size - 1);
116	return 0;
117}
118
119/**
120 * scif_rb_commit - To submit the message to let the peer fetch it
121 * @rb: ring buffer
122 */
123void scif_rb_commit(struct scif_rb *rb)
124{
125	/*
126	 * We must ensure ordering between the all the data committed
127	 * previously before we expose the new message to the peer by
128	 * updating the write_ptr. This write barrier is paired with
129	 * the read barrier in scif_rb_count(..)
130	 */
131	wmb();
132	WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
133#ifdef CONFIG_INTEL_MIC_CARD
134	/*
135	 * X100 Si bug: For the case where a Core is performing an EXT_WR
136	 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
137	 * same address with the same data before it does the Doorbell Write.
138	 * This way, if ordering is violated for the Interrupt Message, it will
139	 * fall just behind the first Posted associated with the first EXT_WR.
140	 */
141	WRITE_ONCE(*rb->write_ptr, rb->current_write_offset);
142#endif
143}
144
145/**
146 * scif_rb_get - To get next message from the ring buffer
147 * @rb: ring buffer
148 * @size: Number of bytes to be read
149 *
150 * Return: NULL if no bytes to be read from the ring buffer, otherwise the
151 *	pointer to the next byte
152 */
153static void *scif_rb_get(struct scif_rb *rb, u32 size)
154{
155	void *header = NULL;
156
157	if (scif_rb_count(rb, size) >= size)
158		header = rb->rb_base + rb->current_read_offset;
159	return header;
160}
161
162/*
163 * scif_rb_get_next - Read from ring buffer.
164 * @rb: ring buffer
165 * @msg: buffer to hold the message.  Must be at least size bytes long
166 * @size: Number of bytes to be read
167 *
168 * Return: number of bytes read if available bytes are >= size, otherwise
169 * returns zero.
170 */
171u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size)
172{
173	void *header = NULL;
174	int read_size = 0;
175
176	header = scif_rb_get(rb, size);
177	if (header) {
178		u32 next_cmd_offset =
179			(rb->current_read_offset + size) & (rb->size - 1);
180
181		read_size = size;
182		rb->current_read_offset = next_cmd_offset;
183		memcpy_fromrb(rb, header, msg, size);
184	}
185	return read_size;
186}
187
188/**
189 * scif_rb_update_read_ptr
190 * @rb: ring buffer
191 */
192void scif_rb_update_read_ptr(struct scif_rb *rb)
193{
194	u32 new_offset;
195
196	new_offset = rb->current_read_offset;
197	/*
198	 * We must ensure ordering between the all the data committed or read
199	 * previously before we expose the empty slot to the peer by updating
200	 * the read_ptr. This barrier is paired with the memory barrier in
201	 * scif_rb_space(..)
202	 */
203	mb();
204	WRITE_ONCE(*rb->read_ptr, new_offset);
205#ifdef CONFIG_INTEL_MIC_CARD
206	/*
207	 * X100 Si Bug: For the case where a Core is performing an EXT_WR
208	 * followed by a Doorbell Write, the Core must perform two EXT_WR to the
209	 * same address with the same data before it does the Doorbell Write.
210	 * This way, if ordering is violated for the Interrupt Message, it will
211	 * fall just behind the first Posted associated with the first EXT_WR.
212	 */
213	WRITE_ONCE(*rb->read_ptr, new_offset);
214#endif
215}
216
217/**
218 * scif_rb_count
219 * @rb: ring buffer
220 * @size: Number of bytes expected to be read
221 *
222 * Return: number of bytes that can be read from the RB
223 */
224u32 scif_rb_count(struct scif_rb *rb, u32 size)
225{
226	if (scif_rb_ring_cnt(rb->current_write_offset,
227			     rb->current_read_offset,
228			     rb->size) < size) {
229		rb->current_write_offset = *rb->write_ptr;
230		/*
231		 * Update from the HW write pointer if empty only once the peer
232		 * has exposed the new message. This read barrier is paired
233		 * with the write barrier in scif_rb_commit(..)
234		 */
235		smp_rmb();
236	}
237	return scif_rb_ring_cnt(rb->current_write_offset,
238				rb->current_read_offset,
239				rb->size);
240}