Loading...
1/*
2 * Copyright (C) 2007, 2008, Marvell International Ltd.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 */
17
18#ifndef MV_XOR_H
19#define MV_XOR_H
20
21#include <linux/types.h>
22#include <linux/io.h>
23#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25
26#define USE_TIMER
27#define MV_XOR_SLOT_SIZE 64
28#define MV_XOR_THRESHOLD 1
29
30#define XOR_OPERATION_MODE_XOR 0
31#define XOR_OPERATION_MODE_MEMCPY 2
32#define XOR_OPERATION_MODE_MEMSET 4
33
34#define XOR_CURR_DESC(chan) (chan->mmr_base + 0x210 + (chan->idx * 4))
35#define XOR_NEXT_DESC(chan) (chan->mmr_base + 0x200 + (chan->idx * 4))
36#define XOR_BYTE_COUNT(chan) (chan->mmr_base + 0x220 + (chan->idx * 4))
37#define XOR_DEST_POINTER(chan) (chan->mmr_base + 0x2B0 + (chan->idx * 4))
38#define XOR_BLOCK_SIZE(chan) (chan->mmr_base + 0x2C0 + (chan->idx * 4))
39#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_base + 0x2E0)
40#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_base + 0x2E4)
41
42#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
43#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
44#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
45#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
46#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
47#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
48#define XOR_INTR_MASK_VALUE 0x3F5
49
50#define WINDOW_BASE(w) (0x250 + ((w) << 2))
51#define WINDOW_SIZE(w) (0x270 + ((w) << 2))
52#define WINDOW_REMAP_HIGH(w) (0x290 + ((w) << 2))
53#define WINDOW_BAR_ENABLE(chan) (0x240 + ((chan) << 2))
54
55struct mv_xor_shared_private {
56 void __iomem *xor_base;
57 void __iomem *xor_high_base;
58};
59
60
61/**
62 * struct mv_xor_device - internal representation of a XOR device
63 * @pdev: Platform device
64 * @id: HW XOR Device selector
65 * @dma_desc_pool: base of DMA descriptor region (DMA address)
66 * @dma_desc_pool_virt: base of DMA descriptor region (CPU address)
67 * @common: embedded struct dma_device
68 */
69struct mv_xor_device {
70 struct platform_device *pdev;
71 int id;
72 dma_addr_t dma_desc_pool;
73 void *dma_desc_pool_virt;
74 struct dma_device common;
75 struct mv_xor_shared_private *shared;
76};
77
78/**
79 * struct mv_xor_chan - internal representation of a XOR channel
80 * @pending: allows batching of hardware operations
81 * @completed_cookie: identifier for the most recently completed operation
82 * @lock: serializes enqueue/dequeue operations to the descriptors pool
83 * @mmr_base: memory mapped register base
84 * @idx: the index of the xor channel
85 * @chain: device chain view of the descriptors
86 * @completed_slots: slots completed by HW but still need to be acked
87 * @device: parent device
88 * @common: common dmaengine channel object members
89 * @last_used: place holder for allocation to continue from where it left off
90 * @all_slots: complete domain of slots usable by the channel
91 * @slots_allocated: records the actual size of the descriptor slot pool
92 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
93 */
94struct mv_xor_chan {
95 int pending;
96 dma_cookie_t completed_cookie;
97 spinlock_t lock; /* protects the descriptor slot pool */
98 void __iomem *mmr_base;
99 unsigned int idx;
100 enum dma_transaction_type current_type;
101 struct list_head chain;
102 struct list_head completed_slots;
103 struct mv_xor_device *device;
104 struct dma_chan common;
105 struct mv_xor_desc_slot *last_used;
106 struct list_head all_slots;
107 int slots_allocated;
108 struct tasklet_struct irq_tasklet;
109#ifdef USE_TIMER
110 unsigned long cleanup_time;
111 u32 current_on_last_cleanup;
112 dma_cookie_t is_complete_cookie;
113#endif
114};
115
116/**
117 * struct mv_xor_desc_slot - software descriptor
118 * @slot_node: node on the mv_xor_chan.all_slots list
119 * @chain_node: node on the mv_xor_chan.chain list
120 * @completed_node: node on the mv_xor_chan.completed_slots list
121 * @hw_desc: virtual address of the hardware descriptor chain
122 * @phys: hardware address of the hardware descriptor chain
123 * @group_head: first operation in a transaction
124 * @slot_cnt: total slots used in an transaction (group of operations)
125 * @slots_per_op: number of slots per operation
126 * @idx: pool index
127 * @unmap_src_cnt: number of xor sources
128 * @unmap_len: transaction bytecount
129 * @tx_list: list of slots that make up a multi-descriptor transaction
130 * @async_tx: support for the async_tx api
131 * @xor_check_result: result of zero sum
132 * @crc32_result: result crc calculation
133 */
134struct mv_xor_desc_slot {
135 struct list_head slot_node;
136 struct list_head chain_node;
137 struct list_head completed_node;
138 enum dma_transaction_type type;
139 void *hw_desc;
140 struct mv_xor_desc_slot *group_head;
141 u16 slot_cnt;
142 u16 slots_per_op;
143 u16 idx;
144 u16 unmap_src_cnt;
145 u32 value;
146 size_t unmap_len;
147 struct list_head tx_list;
148 struct dma_async_tx_descriptor async_tx;
149 union {
150 u32 *xor_check_result;
151 u32 *crc32_result;
152 };
153#ifdef USE_TIMER
154 unsigned long arrival_time;
155 struct timer_list timeout;
156#endif
157};
158
159/* This structure describes XOR descriptor size 64bytes */
160struct mv_xor_desc {
161 u32 status; /* descriptor execution status */
162 u32 crc32_result; /* result of CRC-32 calculation */
163 u32 desc_command; /* type of operation to be carried out */
164 u32 phy_next_desc; /* next descriptor address pointer */
165 u32 byte_count; /* size of src/dst blocks in bytes */
166 u32 phy_dest_addr; /* destination block address */
167 u32 phy_src_addr[8]; /* source block addresses */
168 u32 reserved0;
169 u32 reserved1;
170};
171
172#define to_mv_sw_desc(addr_hw_desc) \
173 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
174
175#define mv_hw_desc_slot_idx(hw_desc, idx) \
176 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
177
178#define MV_XOR_MIN_BYTE_COUNT (128)
179#define XOR_MAX_BYTE_COUNT ((16 * 1024 * 1024) - 1)
180#define MV_XOR_MAX_BYTE_COUNT XOR_MAX_BYTE_COUNT
181
182
183#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2007, 2008, Marvell International Ltd.
4 */
5
6#ifndef MV_XOR_H
7#define MV_XOR_H
8
9#include <linux/types.h>
10#include <linux/io.h>
11#include <linux/dmaengine.h>
12#include <linux/interrupt.h>
13
14#define MV_XOR_POOL_SIZE (MV_XOR_SLOT_SIZE * 3072)
15#define MV_XOR_SLOT_SIZE 64
16#define MV_XOR_THRESHOLD 1
17#define MV_XOR_MAX_CHANNELS 2
18
19#define MV_XOR_MIN_BYTE_COUNT SZ_128
20#define MV_XOR_MAX_BYTE_COUNT (SZ_16M - 1)
21
22/* Values for the XOR_CONFIG register */
23#define XOR_OPERATION_MODE_XOR 0
24#define XOR_OPERATION_MODE_MEMCPY 2
25#define XOR_OPERATION_MODE_IN_DESC 7
26#define XOR_DESCRIPTOR_SWAP BIT(14)
27#define XOR_DESC_SUCCESS 0x40000000
28
29#define XOR_DESC_OPERATION_XOR (0 << 24)
30#define XOR_DESC_OPERATION_CRC32C (1 << 24)
31#define XOR_DESC_OPERATION_MEMCPY (2 << 24)
32
33#define XOR_DESC_DMA_OWNED BIT(31)
34#define XOR_DESC_EOD_INT_EN BIT(31)
35
36#define XOR_CURR_DESC(chan) (chan->mmr_high_base + 0x10 + (chan->idx * 4))
37#define XOR_NEXT_DESC(chan) (chan->mmr_high_base + 0x00 + (chan->idx * 4))
38#define XOR_BYTE_COUNT(chan) (chan->mmr_high_base + 0x20 + (chan->idx * 4))
39#define XOR_DEST_POINTER(chan) (chan->mmr_high_base + 0xB0 + (chan->idx * 4))
40#define XOR_BLOCK_SIZE(chan) (chan->mmr_high_base + 0xC0 + (chan->idx * 4))
41#define XOR_INIT_VALUE_LOW(chan) (chan->mmr_high_base + 0xE0)
42#define XOR_INIT_VALUE_HIGH(chan) (chan->mmr_high_base + 0xE4)
43
44#define XOR_CONFIG(chan) (chan->mmr_base + 0x10 + (chan->idx * 4))
45#define XOR_ACTIVATION(chan) (chan->mmr_base + 0x20 + (chan->idx * 4))
46#define XOR_INTR_CAUSE(chan) (chan->mmr_base + 0x30)
47#define XOR_INTR_MASK(chan) (chan->mmr_base + 0x40)
48#define XOR_ERROR_CAUSE(chan) (chan->mmr_base + 0x50)
49#define XOR_ERROR_ADDR(chan) (chan->mmr_base + 0x60)
50
51#define XOR_INT_END_OF_DESC BIT(0)
52#define XOR_INT_END_OF_CHAIN BIT(1)
53#define XOR_INT_STOPPED BIT(2)
54#define XOR_INT_PAUSED BIT(3)
55#define XOR_INT_ERR_DECODE BIT(4)
56#define XOR_INT_ERR_RDPROT BIT(5)
57#define XOR_INT_ERR_WRPROT BIT(6)
58#define XOR_INT_ERR_OWN BIT(7)
59#define XOR_INT_ERR_PAR BIT(8)
60#define XOR_INT_ERR_MBUS BIT(9)
61
62#define XOR_INTR_ERRORS (XOR_INT_ERR_DECODE | XOR_INT_ERR_RDPROT | \
63 XOR_INT_ERR_WRPROT | XOR_INT_ERR_OWN | \
64 XOR_INT_ERR_PAR | XOR_INT_ERR_MBUS)
65
66#define XOR_INTR_MASK_VALUE (XOR_INT_END_OF_DESC | XOR_INT_END_OF_CHAIN | \
67 XOR_INT_STOPPED | XOR_INTR_ERRORS)
68
69#define WINDOW_BASE(w) (0x50 + ((w) << 2))
70#define WINDOW_SIZE(w) (0x70 + ((w) << 2))
71#define WINDOW_REMAP_HIGH(w) (0x90 + ((w) << 2))
72#define WINDOW_BAR_ENABLE(chan) (0x40 + ((chan) << 2))
73#define WINDOW_OVERRIDE_CTRL(chan) (0xA0 + ((chan) << 2))
74
75#define WINDOW_COUNT 8
76
77struct mv_xor_device {
78 void __iomem *xor_base;
79 void __iomem *xor_high_base;
80 struct clk *clk;
81 struct mv_xor_chan *channels[MV_XOR_MAX_CHANNELS];
82 int xor_type;
83
84 u32 win_start[WINDOW_COUNT];
85 u32 win_end[WINDOW_COUNT];
86};
87
88/**
89 * struct mv_xor_chan - internal representation of a XOR channel
90 * @pending: allows batching of hardware operations
91 * @lock: serializes enqueue/dequeue operations to the descriptors pool
92 * @mmr_base: memory mapped register base
93 * @idx: the index of the xor channel
94 * @chain: device chain view of the descriptors
95 * @free_slots: free slots usable by the channel
96 * @allocated_slots: slots allocated by the driver
97 * @completed_slots: slots completed by HW but still need to be acked
98 * @device: parent device
99 * @common: common dmaengine channel object members
100 * @slots_allocated: records the actual size of the descriptor slot pool
101 * @irq_tasklet: bottom half where mv_xor_slot_cleanup runs
102 * @op_in_desc: new mode of driver, each op is writen to descriptor.
103 */
104struct mv_xor_chan {
105 int pending;
106 spinlock_t lock; /* protects the descriptor slot pool */
107 void __iomem *mmr_base;
108 void __iomem *mmr_high_base;
109 unsigned int idx;
110 int irq;
111 struct list_head chain;
112 struct list_head free_slots;
113 struct list_head allocated_slots;
114 struct list_head completed_slots;
115 dma_addr_t dma_desc_pool;
116 void *dma_desc_pool_virt;
117 size_t pool_size;
118 struct dma_device dmadev;
119 struct dma_chan dmachan;
120 int slots_allocated;
121 struct tasklet_struct irq_tasklet;
122 int op_in_desc;
123 char dummy_src[MV_XOR_MIN_BYTE_COUNT];
124 char dummy_dst[MV_XOR_MIN_BYTE_COUNT];
125 dma_addr_t dummy_src_addr, dummy_dst_addr;
126 u32 saved_config_reg, saved_int_mask_reg;
127
128 struct mv_xor_device *xordev;
129};
130
131/**
132 * struct mv_xor_desc_slot - software descriptor
133 * @node: node on the mv_xor_chan lists
134 * @hw_desc: virtual address of the hardware descriptor chain
135 * @phys: hardware address of the hardware descriptor chain
136 * @slot_used: slot in use or not
137 * @idx: pool index
138 * @tx_list: list of slots that make up a multi-descriptor transaction
139 * @async_tx: support for the async_tx api
140 */
141struct mv_xor_desc_slot {
142 struct list_head node;
143 struct list_head sg_tx_list;
144 enum dma_transaction_type type;
145 void *hw_desc;
146 u16 idx;
147 struct dma_async_tx_descriptor async_tx;
148};
149
150/*
151 * This structure describes XOR descriptor size 64bytes. The
152 * mv_phy_src_idx() macro must be used when indexing the values of the
153 * phy_src_addr[] array. This is due to the fact that the 'descriptor
154 * swap' feature, used on big endian systems, swaps descriptors data
155 * within blocks of 8 bytes. So two consecutive values of the
156 * phy_src_addr[] array are actually swapped in big-endian, which
157 * explains the different mv_phy_src_idx() implementation.
158 */
159#if defined(__LITTLE_ENDIAN)
160struct mv_xor_desc {
161 u32 status; /* descriptor execution status */
162 u32 crc32_result; /* result of CRC-32 calculation */
163 u32 desc_command; /* type of operation to be carried out */
164 u32 phy_next_desc; /* next descriptor address pointer */
165 u32 byte_count; /* size of src/dst blocks in bytes */
166 u32 phy_dest_addr; /* destination block address */
167 u32 phy_src_addr[8]; /* source block addresses */
168 u32 reserved0;
169 u32 reserved1;
170};
171#define mv_phy_src_idx(src_idx) (src_idx)
172#else
173struct mv_xor_desc {
174 u32 crc32_result; /* result of CRC-32 calculation */
175 u32 status; /* descriptor execution status */
176 u32 phy_next_desc; /* next descriptor address pointer */
177 u32 desc_command; /* type of operation to be carried out */
178 u32 phy_dest_addr; /* destination block address */
179 u32 byte_count; /* size of src/dst blocks in bytes */
180 u32 phy_src_addr[8]; /* source block addresses */
181 u32 reserved1;
182 u32 reserved0;
183};
184#define mv_phy_src_idx(src_idx) (src_idx ^ 1)
185#endif
186
187#define to_mv_sw_desc(addr_hw_desc) \
188 container_of(addr_hw_desc, struct mv_xor_desc_slot, hw_desc)
189
190#define mv_hw_desc_slot_idx(hw_desc, idx) \
191 ((void *)(((unsigned long)hw_desc) + ((idx) << 5)))
192
193#endif