Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
Note: File does not exist in v6.2.
  1/*
  2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
  3 *
  4 *   This program is free software; you can redistribute it and/or
  5 *   modify it under the terms of the GNU General Public License
  6 *   as published by the Free Software Foundation, version 2.
  7 *
  8 *   This program is distributed in the hope that it will be useful, but
  9 *   WITHOUT ANY WARRANTY; without even the implied warranty of
 10 *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
 11 *   NON INFRINGEMENT.  See the GNU General Public License for
 12 *   more details.
 13 */
 14
 15/*
 16 * Implementation of mpipe gxio calls.
 17 */
 18
 19#include <linux/errno.h>
 20#include <linux/io.h>
 21#include <linux/module.h>
 22
 23#include <gxio/iorpc_globals.h>
 24#include <gxio/iorpc_mpipe.h>
 25#include <gxio/iorpc_mpipe_info.h>
 26#include <gxio/kiorpc.h>
 27#include <gxio/mpipe.h>
 28
 29/* HACK: Avoid pointless "shadow" warnings. */
 30#define link link_shadow
 31
 32int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
 33{
 34	char file[32];
 35
 36	int fd;
 37	int i;
 38
 39	if (mpipe_index >= GXIO_MPIPE_INSTANCE_MAX)
 40		return -EINVAL;
 41
 42	snprintf(file, sizeof(file), "mpipe/%d/iorpc", mpipe_index);
 43	fd = hv_dev_open((HV_VirtAddr) file, 0);
 44
 45	context->fd = fd;
 46
 47	if (fd < 0) {
 48		if (fd >= GXIO_ERR_MIN && fd <= GXIO_ERR_MAX)
 49			return fd;
 50		else
 51			return -ENODEV;
 52	}
 53
 54	/* Map in the MMIO space. */
 55	context->mmio_cfg_base = (void __force *)
 56		iorpc_ioremap(fd, HV_MPIPE_CONFIG_MMIO_OFFSET,
 57			      HV_MPIPE_CONFIG_MMIO_SIZE);
 58	if (context->mmio_cfg_base == NULL)
 59		goto cfg_failed;
 60
 61	context->mmio_fast_base = (void __force *)
 62		iorpc_ioremap(fd, HV_MPIPE_FAST_MMIO_OFFSET,
 63			      HV_MPIPE_FAST_MMIO_SIZE);
 64	if (context->mmio_fast_base == NULL)
 65		goto fast_failed;
 66
 67	/* Initialize the stacks. */
 68	for (i = 0; i < 8; i++)
 69		context->__stacks.stacks[i] = 255;
 70
 71	context->instance = mpipe_index;
 72
 73	return 0;
 74
 75      fast_failed:
 76	iounmap((void __force __iomem *)(context->mmio_cfg_base));
 77      cfg_failed:
 78	hv_dev_close(context->fd);
 79	context->fd = -1;
 80	return -ENODEV;
 81}
 82
 83EXPORT_SYMBOL_GPL(gxio_mpipe_init);
 84
 85int gxio_mpipe_destroy(gxio_mpipe_context_t *context)
 86{
 87	iounmap((void __force __iomem *)(context->mmio_cfg_base));
 88	iounmap((void __force __iomem *)(context->mmio_fast_base));
 89	return hv_dev_close(context->fd);
 90}
 91
 92EXPORT_SYMBOL_GPL(gxio_mpipe_destroy);
 93
 94static int16_t gxio_mpipe_buffer_sizes[8] =
 95	{ 128, 256, 512, 1024, 1664, 4096, 10368, 16384 };
 96
 97gxio_mpipe_buffer_size_enum_t gxio_mpipe_buffer_size_to_buffer_size_enum(size_t
 98									 size)
 99{
100	int i;
101	for (i = 0; i < 7; i++)
102		if (size <= gxio_mpipe_buffer_sizes[i])
103			break;
104	return i;
105}
106
107EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_to_buffer_size_enum);
108
109size_t gxio_mpipe_buffer_size_enum_to_buffer_size(gxio_mpipe_buffer_size_enum_t
110						  buffer_size_enum)
111{
112	if (buffer_size_enum > 7)
113		buffer_size_enum = 7;
114
115	return gxio_mpipe_buffer_sizes[buffer_size_enum];
116}
117
118EXPORT_SYMBOL_GPL(gxio_mpipe_buffer_size_enum_to_buffer_size);
119
120size_t gxio_mpipe_calc_buffer_stack_bytes(unsigned long buffers)
121{
122	const int BUFFERS_PER_LINE = 12;
123
124	/* Count the number of cachlines. */
125	unsigned long lines =
126		(buffers + BUFFERS_PER_LINE - 1) / BUFFERS_PER_LINE;
127
128	/* Convert to bytes. */
129	return lines * CHIP_L2_LINE_SIZE();
130}
131
132EXPORT_SYMBOL_GPL(gxio_mpipe_calc_buffer_stack_bytes);
133
134int gxio_mpipe_init_buffer_stack(gxio_mpipe_context_t *context,
135				 unsigned int stack,
136				 gxio_mpipe_buffer_size_enum_t
137				 buffer_size_enum, void *mem, size_t mem_size,
138				 unsigned int mem_flags)
139{
140	int result;
141
142	memset(mem, 0, mem_size);
143
144	result = gxio_mpipe_init_buffer_stack_aux(context, mem, mem_size,
145						  mem_flags, stack,
146						  buffer_size_enum);
147	if (result < 0)
148		return result;
149
150	/* Save the stack. */
151	context->__stacks.stacks[buffer_size_enum] = stack;
152
153	return 0;
154}
155
156EXPORT_SYMBOL_GPL(gxio_mpipe_init_buffer_stack);
157
158int gxio_mpipe_init_notif_ring(gxio_mpipe_context_t *context,
159			       unsigned int ring,
160			       void *mem, size_t mem_size,
161			       unsigned int mem_flags)
162{
163	return gxio_mpipe_init_notif_ring_aux(context, mem, mem_size,
164					      mem_flags, ring);
165}
166
167EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_ring);
168
169int gxio_mpipe_init_notif_group_and_buckets(gxio_mpipe_context_t *context,
170					    unsigned int group,
171					    unsigned int ring,
172					    unsigned int num_rings,
173					    unsigned int bucket,
174					    unsigned int num_buckets,
175					    gxio_mpipe_bucket_mode_t mode)
176{
177	int i;
178	int result;
179
180	gxio_mpipe_bucket_info_t bucket_info = { {
181						  .group = group,
182						  .mode = mode,
183						  }
184	};
185
186	gxio_mpipe_notif_group_bits_t bits = { {0} };
187
188	for (i = 0; i < num_rings; i++)
189		gxio_mpipe_notif_group_add_ring(&bits, ring + i);
190
191	result = gxio_mpipe_init_notif_group(context, group, bits);
192	if (result != 0)
193		return result;
194
195	for (i = 0; i < num_buckets; i++) {
196		bucket_info.notifring = ring + (i % num_rings);
197
198		result = gxio_mpipe_init_bucket(context, bucket + i,
199						bucket_info);
200		if (result != 0)
201			return result;
202	}
203
204	return 0;
205}
206
207EXPORT_SYMBOL_GPL(gxio_mpipe_init_notif_group_and_buckets);
208
209int gxio_mpipe_init_edma_ring(gxio_mpipe_context_t *context,
210			      unsigned int ring, unsigned int channel,
211			      void *mem, size_t mem_size,
212			      unsigned int mem_flags)
213{
214	memset(mem, 0, mem_size);
215
216	return gxio_mpipe_init_edma_ring_aux(context, mem, mem_size, mem_flags,
217					     ring, channel);
218}
219
220EXPORT_SYMBOL_GPL(gxio_mpipe_init_edma_ring);
221
222void gxio_mpipe_rules_init(gxio_mpipe_rules_t *rules,
223			   gxio_mpipe_context_t *context)
224{
225	rules->context = context;
226	memset(&rules->list, 0, sizeof(rules->list));
227}
228
229EXPORT_SYMBOL_GPL(gxio_mpipe_rules_init);
230
231int gxio_mpipe_rules_begin(gxio_mpipe_rules_t *rules,
232			   unsigned int bucket, unsigned int num_buckets,
233			   gxio_mpipe_rules_stacks_t *stacks)
234{
235	int i;
236	int stack = 255;
237
238	gxio_mpipe_rules_list_t *list = &rules->list;
239
240	/* Current rule. */
241	gxio_mpipe_rules_rule_t *rule =
242		(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
243
244	unsigned int head = list->tail;
245
246	/*
247	 * Align next rule properly.
248	 *Note that "dmacs_and_vlans" will also be aligned.
249	 */
250	unsigned int pad = 0;
251	while (((head + pad) % __alignof__(gxio_mpipe_rules_rule_t)) != 0)
252		pad++;
253
254	/*
255	 * Verify room.
256	 * ISSUE: Mark rules as broken on error?
257	 */
258	if (head + pad + sizeof(*rule) >= sizeof(list->rules))
259		return GXIO_MPIPE_ERR_RULES_FULL;
260
261	/* Verify num_buckets is a power of 2. */
262	if (__builtin_popcount(num_buckets) != 1)
263		return GXIO_MPIPE_ERR_RULES_INVALID;
264
265	/* Add padding to previous rule. */
266	rule->size += pad;
267
268	/* Start a new rule. */
269	list->head = head + pad;
270
271	rule = (gxio_mpipe_rules_rule_t *) (list->rules + list->head);
272
273	/* Default some values. */
274	rule->headroom = 2;
275	rule->tailroom = 0;
276	rule->capacity = 16384;
277
278	/* Save the bucket info. */
279	rule->bucket_mask = num_buckets - 1;
280	rule->bucket_first = bucket;
281
282	for (i = 8 - 1; i >= 0; i--) {
283		int maybe =
284			stacks ? stacks->stacks[i] : rules->context->__stacks.
285			stacks[i];
286		if (maybe != 255)
287			stack = maybe;
288		rule->stacks.stacks[i] = stack;
289	}
290
291	if (stack == 255)
292		return GXIO_MPIPE_ERR_RULES_INVALID;
293
294	/* NOTE: Only entries at the end of the array can be 255. */
295	for (i = 8 - 1; i > 0; i--) {
296		if (rule->stacks.stacks[i] == 255) {
297			rule->stacks.stacks[i] = stack;
298			rule->capacity =
299				gxio_mpipe_buffer_size_enum_to_buffer_size(i -
300									   1);
301		}
302	}
303
304	rule->size = sizeof(*rule);
305	list->tail = list->head + rule->size;
306
307	return 0;
308}
309
310EXPORT_SYMBOL_GPL(gxio_mpipe_rules_begin);
311
312int gxio_mpipe_rules_add_channel(gxio_mpipe_rules_t *rules,
313				 unsigned int channel)
314{
315	gxio_mpipe_rules_list_t *list = &rules->list;
316
317	gxio_mpipe_rules_rule_t *rule =
318		(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
319
320	/* Verify channel. */
321	if (channel >= 32)
322		return GXIO_MPIPE_ERR_RULES_INVALID;
323
324	/* Verify begun. */
325	if (list->tail == 0)
326		return GXIO_MPIPE_ERR_RULES_EMPTY;
327
328	rule->channel_bits |= (1UL << channel);
329
330	return 0;
331}
332
333EXPORT_SYMBOL_GPL(gxio_mpipe_rules_add_channel);
334
335int gxio_mpipe_rules_set_headroom(gxio_mpipe_rules_t *rules, uint8_t headroom)
336{
337	gxio_mpipe_rules_list_t *list = &rules->list;
338
339	gxio_mpipe_rules_rule_t *rule =
340		(gxio_mpipe_rules_rule_t *) (list->rules + list->head);
341
342	/* Verify begun. */
343	if (list->tail == 0)
344		return GXIO_MPIPE_ERR_RULES_EMPTY;
345
346	rule->headroom = headroom;
347
348	return 0;
349}
350
351EXPORT_SYMBOL_GPL(gxio_mpipe_rules_set_headroom);
352
353int gxio_mpipe_rules_commit(gxio_mpipe_rules_t *rules)
354{
355	gxio_mpipe_rules_list_t *list = &rules->list;
356	unsigned int size =
357		offsetof(gxio_mpipe_rules_list_t, rules) + list->tail;
358	return gxio_mpipe_commit_rules(rules->context, list, size);
359}
360
361EXPORT_SYMBOL_GPL(gxio_mpipe_rules_commit);
362
363int gxio_mpipe_iqueue_init(gxio_mpipe_iqueue_t *iqueue,
364			   gxio_mpipe_context_t *context,
365			   unsigned int ring,
366			   void *mem, size_t mem_size, unsigned int mem_flags)
367{
368	/* The init call below will verify that "mem_size" is legal. */
369	unsigned int num_entries = mem_size / sizeof(gxio_mpipe_idesc_t);
370
371	iqueue->context = context;
372	iqueue->idescs = (gxio_mpipe_idesc_t *)mem;
373	iqueue->ring = ring;
374	iqueue->num_entries = num_entries;
375	iqueue->mask_num_entries = num_entries - 1;
376	iqueue->log2_num_entries = __builtin_ctz(num_entries);
377	iqueue->head = 1;
378#ifdef __BIG_ENDIAN__
379	iqueue->swapped = 0;
380#endif
381
382	/* Initialize the "tail". */
383	__gxio_mmio_write(mem, iqueue->head);
384
385	return gxio_mpipe_init_notif_ring(context, ring, mem, mem_size,
386					  mem_flags);
387}
388
389EXPORT_SYMBOL_GPL(gxio_mpipe_iqueue_init);
390
391int gxio_mpipe_equeue_init(gxio_mpipe_equeue_t *equeue,
392			   gxio_mpipe_context_t *context,
393			   unsigned int ering,
394			   unsigned int channel,
395			   void *mem, unsigned int mem_size,
396			   unsigned int mem_flags)
397{
398	/* The init call below will verify that "mem_size" is legal. */
399	unsigned int num_entries = mem_size / sizeof(gxio_mpipe_edesc_t);
400
401	/* Offset used to read number of completed commands. */
402	MPIPE_EDMA_POST_REGION_ADDR_t offset;
403
404	int result = gxio_mpipe_init_edma_ring(context, ering, channel,
405					       mem, mem_size, mem_flags);
406	if (result < 0)
407		return result;
408
409	memset(equeue, 0, sizeof(*equeue));
410
411	offset.word = 0;
412	offset.region =
413		MPIPE_MMIO_ADDR__REGION_VAL_EDMA -
414		MPIPE_MMIO_ADDR__REGION_VAL_IDMA;
415	offset.ring = ering;
416
417	__gxio_dma_queue_init(&equeue->dma_queue,
418			      context->mmio_fast_base + offset.word,
419			      num_entries);
420	equeue->edescs = mem;
421	equeue->mask_num_entries = num_entries - 1;
422	equeue->log2_num_entries = __builtin_ctz(num_entries);
423	equeue->context = context;
424	equeue->ering = ering;
425	equeue->channel = channel;
426
427	return 0;
428}
429
430EXPORT_SYMBOL_GPL(gxio_mpipe_equeue_init);
431
432int gxio_mpipe_set_timestamp(gxio_mpipe_context_t *context,
433			     const struct timespec *ts)
434{
435	cycles_t cycles = get_cycles();
436	return gxio_mpipe_set_timestamp_aux(context, (uint64_t)ts->tv_sec,
437					    (uint64_t)ts->tv_nsec,
438					    (uint64_t)cycles);
439}
440
441int gxio_mpipe_get_timestamp(gxio_mpipe_context_t *context,
442			     struct timespec *ts)
443{
444	int ret;
445	cycles_t cycles_prev, cycles_now, clock_rate;
446	cycles_prev = get_cycles();
447	ret = gxio_mpipe_get_timestamp_aux(context, (uint64_t *)&ts->tv_sec,
448					   (uint64_t *)&ts->tv_nsec,
449					   (uint64_t *)&cycles_now);
450	if (ret < 0) {
451		return ret;
452	}
453
454	clock_rate = get_clock_rate();
455	ts->tv_nsec -= (cycles_now - cycles_prev) * 1000000000LL / clock_rate;
456	if (ts->tv_nsec < 0) {
457		ts->tv_nsec += 1000000000LL;
458		ts->tv_sec -= 1;
459	}
460	return ret;
461}
462
463int gxio_mpipe_adjust_timestamp(gxio_mpipe_context_t *context, int64_t delta)
464{
465	return gxio_mpipe_adjust_timestamp_aux(context, delta);
466}
467
468/* Get our internal context used for link name access.  This context is
469 *  special in that it is not associated with an mPIPE service domain.
470 */
471static gxio_mpipe_context_t *_gxio_get_link_context(void)
472{
473	static gxio_mpipe_context_t context;
474	static gxio_mpipe_context_t *contextp;
475	static int tried_open = 0;
476	static DEFINE_MUTEX(mutex);
477
478	mutex_lock(&mutex);
479
480	if (!tried_open) {
481		int i = 0;
482		tried_open = 1;
483
484		/*
485		 * "4" here is the maximum possible number of mPIPE shims; it's
486		 * an exaggeration but we shouldn't ever go beyond 2 anyway.
487		 */
488		for (i = 0; i < 4; i++) {
489			char file[80];
490
491			snprintf(file, sizeof(file), "mpipe/%d/iorpc_info", i);
492			context.fd = hv_dev_open((HV_VirtAddr) file, 0);
493			if (context.fd < 0)
494				continue;
495
496			contextp = &context;
497			break;
498		}
499	}
500
501	mutex_unlock(&mutex);
502
503	return contextp;
504}
505
506int gxio_mpipe_link_instance(const char *link_name)
507{
508	_gxio_mpipe_link_name_t name;
509	gxio_mpipe_context_t *context = _gxio_get_link_context();
510
511	if (!context)
512		return GXIO_ERR_NO_DEVICE;
513
514	strncpy(name.name, link_name, sizeof(name.name));
515	name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
516
517	return gxio_mpipe_info_instance_aux(context, name);
518}
519
520int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
521{
522	int rv;
523	_gxio_mpipe_link_name_t name;
524	_gxio_mpipe_link_mac_t mac;
525
526	gxio_mpipe_context_t *context = _gxio_get_link_context();
527	if (!context)
528		return GXIO_ERR_NO_DEVICE;
529
530	rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
531	if (rv >= 0) {
532		strncpy(link_name, name.name, sizeof(name.name));
533		memcpy(link_mac, mac.mac, sizeof(mac.mac));
534	}
535
536	return rv;
537}
538
539EXPORT_SYMBOL_GPL(gxio_mpipe_link_enumerate_mac);
540
541int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
542			 gxio_mpipe_context_t *context, const char *link_name,
543			 unsigned int flags)
544{
545	_gxio_mpipe_link_name_t name;
546	int rv;
547
548	strncpy(name.name, link_name, sizeof(name.name));
549	name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
550
551	rv = gxio_mpipe_link_open_aux(context, name, flags);
552	if (rv < 0)
553		return rv;
554
555	link->context = context;
556	link->channel = rv >> 8;
557	link->mac = rv & 0xFF;
558
559	return 0;
560}
561
562EXPORT_SYMBOL_GPL(gxio_mpipe_link_open);
563
564int gxio_mpipe_link_close(gxio_mpipe_link_t *link)
565{
566	return gxio_mpipe_link_close_aux(link->context, link->mac);
567}
568
569EXPORT_SYMBOL_GPL(gxio_mpipe_link_close);
570
571int gxio_mpipe_link_set_attr(gxio_mpipe_link_t *link, uint32_t attr,
572			     int64_t val)
573{
574	return gxio_mpipe_link_set_attr_aux(link->context, link->mac, attr,
575					    val);
576}
577
578EXPORT_SYMBOL_GPL(gxio_mpipe_link_set_attr);