Linux Audio

Check our new training course

Loading...
  1/**********************************************************************
  2 * Author: Cavium, Inc.
  3 *
  4 * Contact: support@cavium.com
  5 *          Please include "LiquidIO" in the subject.
  6 *
  7 * Copyright (c) 2003-2016 Cavium, Inc.
  8 *
  9 * This file is free software; you can redistribute it and/or modify
 10 * it under the terms of the GNU General Public License, Version 2, as
 11 * published by the Free Software Foundation.
 12 *
 13 * This file is distributed in the hope that it will be useful, but
 14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
 15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
 16 * NONINFRINGEMENT.  See the GNU General Public License for more details.
 17 ***********************************************************************/
 18/*! \file octeon_main.h
 19 *  \brief Host Driver: This file is included by all host driver source files
 20 *  to include common definitions.
 21 */
 22
 23#ifndef _OCTEON_MAIN_H_
 24#define  _OCTEON_MAIN_H_
 25
 26#if BITS_PER_LONG == 32
 27#define CVM_CAST64(v) ((long long)(v))
 28#elif BITS_PER_LONG == 64
 29#define CVM_CAST64(v) ((long long)(long)(v))
 30#else
 31#error "Unknown system architecture"
 32#endif
 33
 34#define DRV_NAME "LiquidIO"
 35
 36/** This structure is used by NIC driver to store information required
 37 * to free the sk_buff when the packet has been fetched by Octeon.
 38 * Bytes offset below assume worst-case of a 64-bit system.
 39 */
 40struct octnet_buf_free_info {
 41	/** Bytes 1-8.  Pointer to network device private structure. */
 42	struct lio *lio;
 43
 44	/** Bytes 9-16.  Pointer to sk_buff. */
 45	struct sk_buff *skb;
 46
 47	/** Bytes 17-24.  Pointer to gather list. */
 48	struct octnic_gather *g;
 49
 50	/** Bytes 25-32. Physical address of skb->data or gather list. */
 51	u64 dptr;
 52
 53	/** Bytes 33-47. Piggybacked soft command, if any */
 54	struct octeon_soft_command *sc;
 55};
 56
 57/* BQL-related functions */
 58void octeon_report_sent_bytes_to_bql(void *buf, int reqtype);
 59void octeon_update_tx_completion_counters(void *buf, int reqtype,
 60					  unsigned int *pkts_compl,
 61					  unsigned int *bytes_compl);
 62void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
 63					unsigned int bytes_compl);
 64void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac);
 65/** Swap 8B blocks */
 66static inline void octeon_swap_8B_data(u64 *data, u32 blocks)
 67{
 68	while (blocks) {
 69		cpu_to_be64s(data);
 70		blocks--;
 71		data++;
 72	}
 73}
 74
 75/**
 76 * \brief unmaps a PCI BAR
 77 * @param oct Pointer to Octeon device
 78 * @param baridx bar index
 79 */
 80static inline void octeon_unmap_pci_barx(struct octeon_device *oct, int baridx)
 81{
 82	dev_dbg(&oct->pci_dev->dev, "Freeing PCI mapped regions for Bar%d\n",
 83		baridx);
 84
 85	if (oct->mmio[baridx].done)
 86		iounmap(oct->mmio[baridx].hw_addr);
 87
 88	if (oct->mmio[baridx].start)
 89		pci_release_region(oct->pci_dev, baridx * 2);
 90}
 91
 92/**
 93 * \brief maps a PCI BAR
 94 * @param oct Pointer to Octeon device
 95 * @param baridx bar index
 96 * @param max_map_len maximum length of mapped memory
 97 */
 98static inline int octeon_map_pci_barx(struct octeon_device *oct,
 99				      int baridx, int max_map_len)
100{
101	u32 mapped_len = 0;
102
103	if (pci_request_region(oct->pci_dev, baridx * 2, DRV_NAME)) {
104		dev_err(&oct->pci_dev->dev, "pci_request_region failed for bar %d\n",
105			baridx);
106		return 1;
107	}
108
109	oct->mmio[baridx].start = pci_resource_start(oct->pci_dev, baridx * 2);
110	oct->mmio[baridx].len = pci_resource_len(oct->pci_dev, baridx * 2);
111
112	mapped_len = oct->mmio[baridx].len;
113	if (!mapped_len)
114		goto err_release_region;
115
116	if (max_map_len && (mapped_len > max_map_len))
117		mapped_len = max_map_len;
118
119	oct->mmio[baridx].hw_addr =
120		ioremap(oct->mmio[baridx].start, mapped_len);
121	oct->mmio[baridx].mapped_len = mapped_len;
122
123	dev_dbg(&oct->pci_dev->dev, "BAR%d start: 0x%llx mapped %u of %u bytes\n",
124		baridx, oct->mmio[baridx].start, mapped_len,
125		oct->mmio[baridx].len);
126
127	if (!oct->mmio[baridx].hw_addr) {
128		dev_err(&oct->pci_dev->dev, "error ioremap for bar %d\n",
129			baridx);
130		goto err_release_region;
131	}
132	oct->mmio[baridx].done = 1;
133
134	return 0;
135
136err_release_region:
137	pci_release_region(oct->pci_dev, baridx * 2);
138	return 1;
139}
140
141static inline void *
142cnnic_numa_alloc_aligned_dma(u32 size,
143			     u32 *alloc_size,
144			     size_t *orig_ptr,
145			     int numa_node)
146{
147	int retries = 0;
148	void *ptr = NULL;
149
150#define OCTEON_MAX_ALLOC_RETRIES     1
151	do {
152		struct page *page = NULL;
153
154		page = alloc_pages_node(numa_node,
155					GFP_KERNEL,
156					get_order(size));
157		if (!page)
158			page = alloc_pages(GFP_KERNEL,
159					   get_order(size));
160		ptr = (void *)page_address(page);
161		if ((unsigned long)ptr & 0x07) {
162			__free_pages(page, get_order(size));
163			ptr = NULL;
164			/* Increment the size required if the first
165			 * attempt failed.
166			 */
167			if (!retries)
168				size += 7;
169		}
170		retries++;
171	} while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
172
173	*alloc_size = size;
174	*orig_ptr = (unsigned long)ptr;
175	if ((unsigned long)ptr & 0x07)
176		ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
177	return ptr;
178}
179
180#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
181		free_pages(orig_ptr, get_order(size))
182
183static inline int
184sleep_cond(wait_queue_head_t *wait_queue, int *condition)
185{
186	int errno = 0;
187	wait_queue_t we;
188
189	init_waitqueue_entry(&we, current);
190	add_wait_queue(wait_queue, &we);
191	while (!(READ_ONCE(*condition))) {
192		set_current_state(TASK_INTERRUPTIBLE);
193		if (signal_pending(current)) {
194			errno = -EINTR;
195			goto out;
196		}
197		schedule();
198	}
199out:
200	set_current_state(TASK_RUNNING);
201	remove_wait_queue(wait_queue, &we);
202	return errno;
203}
204
205/* Gives up the CPU for a timeout period.
206 * Check that the condition is not true before we go to sleep for a
207 * timeout period.
208 */
209static inline void
210sleep_timeout_cond(wait_queue_head_t *wait_queue,
211		   int *condition,
212		   int timeout)
213{
214	wait_queue_t we;
215
216	init_waitqueue_entry(&we, current);
217	add_wait_queue(wait_queue, &we);
218	set_current_state(TASK_INTERRUPTIBLE);
219	if (!(*condition))
220		schedule_timeout(timeout);
221	set_current_state(TASK_RUNNING);
222	remove_wait_queue(wait_queue, &we);
223}
224
225#ifndef ROUNDUP4
226#define ROUNDUP4(val) (((val) + 3) & 0xfffffffc)
227#endif
228
229#ifndef ROUNDUP8
230#define ROUNDUP8(val) (((val) + 7) & 0xfffffff8)
231#endif
232
233#ifndef ROUNDUP16
234#define ROUNDUP16(val) (((val) + 15) & 0xfffffff0)
235#endif
236
237#ifndef ROUNDUP128
238#define ROUNDUP128(val) (((val) + 127) & 0xffffff80)
239#endif
240
241#endif /* _OCTEON_MAIN_H_ */
1