Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright Sunplus Technology Co., Ltd.
  3 *       All rights reserved.
  4 */
  5
  6#include <linux/platform_device.h>
  7#include <linux/netdevice.h>
  8#include <linux/of_mdio.h>
  9
 10#include "spl2sw_define.h"
 11#include "spl2sw_desc.h"
 12
 13void spl2sw_rx_descs_flush(struct spl2sw_common *comm)
 14{
 15	struct spl2sw_skb_info *rx_skbinfo;
 16	struct spl2sw_mac_desc *rx_desc;
 17	u32 i, j;
 18
 19	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
 20		rx_desc = comm->rx_desc[i];
 21		rx_skbinfo = comm->rx_skb_info[i];
 22		for (j = 0; j < comm->rx_desc_num[i]; j++) {
 23			rx_desc[j].addr1 = rx_skbinfo[j].mapping;
 24			rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
 25					  RXD_EOR | comm->rx_desc_buff_size :
 26					  comm->rx_desc_buff_size;
 27			wmb();	/* Set RXD_OWN after other fields are ready. */
 28			rx_desc[j].cmd1 = RXD_OWN;
 29		}
 30	}
 31}
 32
 33void spl2sw_tx_descs_clean(struct spl2sw_common *comm)
 34{
 35	u32 i;
 36
 37	if (!comm->tx_desc)
 38		return;
 39
 40	for (i = 0; i < TX_DESC_NUM; i++) {
 41		comm->tx_desc[i].cmd1 = 0;
 42		wmb();	/* Clear TXD_OWN and then set other fields. */
 43		comm->tx_desc[i].cmd2 = 0;
 44		comm->tx_desc[i].addr1 = 0;
 45		comm->tx_desc[i].addr2 = 0;
 46
 47		if (comm->tx_temp_skb_info[i].mapping) {
 48			dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
 49					 comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE);
 50			comm->tx_temp_skb_info[i].mapping = 0;
 51		}
 52
 53		if (comm->tx_temp_skb_info[i].skb) {
 54			dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb);
 55			comm->tx_temp_skb_info[i].skb = NULL;
 56		}
 57	}
 58}
 59
 60void spl2sw_rx_descs_clean(struct spl2sw_common *comm)
 61{
 62	struct spl2sw_skb_info *rx_skbinfo;
 63	struct spl2sw_mac_desc *rx_desc;
 64	u32 i, j;
 65
 66	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
 67		if (!comm->rx_skb_info[i])
 68			continue;
 69
 70		rx_desc = comm->rx_desc[i];
 71		rx_skbinfo = comm->rx_skb_info[i];
 72		for (j = 0; j < comm->rx_desc_num[i]; j++) {
 73			rx_desc[j].cmd1 = 0;
 74			wmb();	/* Clear RXD_OWN and then set other fields. */
 75			rx_desc[j].cmd2 = 0;
 76			rx_desc[j].addr1 = 0;
 77
 78			if (rx_skbinfo[j].skb) {
 79				dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
 80						 comm->rx_desc_buff_size, DMA_FROM_DEVICE);
 81				dev_kfree_skb_any(rx_skbinfo[j].skb);
 82				rx_skbinfo[j].skb = NULL;
 83				rx_skbinfo[j].mapping = 0;
 84			}
 85		}
 86
 87		kfree(rx_skbinfo);
 88		comm->rx_skb_info[i] = NULL;
 89	}
 90}
 91
 92void spl2sw_descs_clean(struct spl2sw_common *comm)
 93{
 94	spl2sw_rx_descs_clean(comm);
 95	spl2sw_tx_descs_clean(comm);
 96}
 97
 98void spl2sw_descs_free(struct spl2sw_common *comm)
 99{
100	u32 i;
101
102	spl2sw_descs_clean(comm);
103	comm->tx_desc = NULL;
104	for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
105		comm->rx_desc[i] = NULL;
106
107	/*  Free descriptor area  */
108	if (comm->desc_base) {
109		dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base,
110				  comm->desc_dma);
111		comm->desc_base = NULL;
112		comm->desc_dma = 0;
113		comm->desc_size = 0;
114	}
115}
116
117void spl2sw_tx_descs_init(struct spl2sw_common *comm)
118{
119	memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) *
120	       (TX_DESC_NUM + MAC_GUARD_DESC_NUM));
121}
122
123int spl2sw_rx_descs_init(struct spl2sw_common *comm)
124{
125	struct spl2sw_skb_info *rx_skbinfo;
126	struct spl2sw_mac_desc *rx_desc;
127	struct sk_buff *skb;
128	u32 mapping;
129	u32 i, j;
130
131	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
132		comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo),
133					       GFP_KERNEL | GFP_DMA);
134		if (!comm->rx_skb_info[i])
135			goto mem_alloc_fail;
136
137		rx_skbinfo = comm->rx_skb_info[i];
138		rx_desc = comm->rx_desc[i];
139		for (j = 0; j < comm->rx_desc_num[i]; j++) {
140			skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
141			if (!skb)
142				goto mem_alloc_fail;
143
144			rx_skbinfo[j].skb = skb;
145			mapping = dma_map_single(&comm->pdev->dev, skb->data,
146						 comm->rx_desc_buff_size,
147						 DMA_FROM_DEVICE);
148			if (dma_mapping_error(&comm->pdev->dev, mapping))
149				goto mem_alloc_fail;
150
151			rx_skbinfo[j].mapping = mapping;
152			rx_desc[j].addr1 = mapping;
153			rx_desc[j].addr2 = 0;
154			rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
155					  RXD_EOR | comm->rx_desc_buff_size :
156					  comm->rx_desc_buff_size;
157			wmb();	/* Set RXD_OWN after other fields are effective. */
158			rx_desc[j].cmd1 = RXD_OWN;
159		}
160	}
161
162	return 0;
163
164mem_alloc_fail:
165	spl2sw_rx_descs_clean(comm);
166	return -ENOMEM;
167}
168
169int spl2sw_descs_alloc(struct spl2sw_common *comm)
170{
171	s32 desc_size;
172	u32 i;
173
174	/* Alloc descriptor area  */
175	desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc);
176	for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
177		desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc);
178
179	comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma,
180					     GFP_KERNEL);
181	if (!comm->desc_base)
182		return -ENOMEM;
183
184	comm->desc_size = desc_size;
185
186	/* Setup Tx descriptor */
187	comm->tx_desc = comm->desc_base;
188
189	/* Setup Rx descriptor */
190	comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM];
191	for (i = 1; i < RX_DESC_QUEUE_NUM; i++)
192		comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1];
193
194	return 0;
195}
196
197int spl2sw_descs_init(struct spl2sw_common *comm)
198{
199	u32 i, ret;
200
201	/* Initialize rx descriptor's data */
202	comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM;
203	comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM;
204
205	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
206		comm->rx_desc[i] = NULL;
207		comm->rx_skb_info[i] = NULL;
208		comm->rx_pos[i] = 0;
209	}
210	comm->rx_desc_buff_size = MAC_RX_LEN_MAX;
211
212	/* Initialize tx descriptor's data */
213	comm->tx_done_pos = 0;
214	comm->tx_desc = NULL;
215	comm->tx_pos = 0;
216	comm->tx_desc_full = 0;
217	for (i = 0; i < TX_DESC_NUM; i++)
218		comm->tx_temp_skb_info[i].skb = NULL;
219
220	/* Allocate tx & rx descriptors. */
221	ret = spl2sw_descs_alloc(comm);
222	if (ret)
223		return ret;
224
225	spl2sw_tx_descs_init(comm);
226
227	return spl2sw_rx_descs_init(comm);
228}
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright Sunplus Technology Co., Ltd.
  3 *       All rights reserved.
  4 */
  5
  6#include <linux/platform_device.h>
  7#include <linux/netdevice.h>
  8#include <linux/of_mdio.h>
  9
 10#include "spl2sw_define.h"
 11#include "spl2sw_desc.h"
 12
 13void spl2sw_rx_descs_flush(struct spl2sw_common *comm)
 14{
 15	struct spl2sw_skb_info *rx_skbinfo;
 16	struct spl2sw_mac_desc *rx_desc;
 17	u32 i, j;
 18
 19	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
 20		rx_desc = comm->rx_desc[i];
 21		rx_skbinfo = comm->rx_skb_info[i];
 22		for (j = 0; j < comm->rx_desc_num[i]; j++) {
 23			rx_desc[j].addr1 = rx_skbinfo[j].mapping;
 24			rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
 25					  RXD_EOR | comm->rx_desc_buff_size :
 26					  comm->rx_desc_buff_size;
 27			wmb();	/* Set RXD_OWN after other fields are ready. */
 28			rx_desc[j].cmd1 = RXD_OWN;
 29		}
 30	}
 31}
 32
 33void spl2sw_tx_descs_clean(struct spl2sw_common *comm)
 34{
 35	u32 i;
 36
 37	if (!comm->tx_desc)
 38		return;
 39
 40	for (i = 0; i < TX_DESC_NUM; i++) {
 41		comm->tx_desc[i].cmd1 = 0;
 42		wmb();	/* Clear TXD_OWN and then set other fields. */
 43		comm->tx_desc[i].cmd2 = 0;
 44		comm->tx_desc[i].addr1 = 0;
 45		comm->tx_desc[i].addr2 = 0;
 46
 47		if (comm->tx_temp_skb_info[i].mapping) {
 48			dma_unmap_single(&comm->pdev->dev, comm->tx_temp_skb_info[i].mapping,
 49					 comm->tx_temp_skb_info[i].skb->len, DMA_TO_DEVICE);
 50			comm->tx_temp_skb_info[i].mapping = 0;
 51		}
 52
 53		if (comm->tx_temp_skb_info[i].skb) {
 54			dev_kfree_skb_any(comm->tx_temp_skb_info[i].skb);
 55			comm->tx_temp_skb_info[i].skb = NULL;
 56		}
 57	}
 58}
 59
 60void spl2sw_rx_descs_clean(struct spl2sw_common *comm)
 61{
 62	struct spl2sw_skb_info *rx_skbinfo;
 63	struct spl2sw_mac_desc *rx_desc;
 64	u32 i, j;
 65
 66	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
 67		if (!comm->rx_skb_info[i])
 68			continue;
 69
 70		rx_desc = comm->rx_desc[i];
 71		rx_skbinfo = comm->rx_skb_info[i];
 72		for (j = 0; j < comm->rx_desc_num[i]; j++) {
 73			rx_desc[j].cmd1 = 0;
 74			wmb();	/* Clear RXD_OWN and then set other fields. */
 75			rx_desc[j].cmd2 = 0;
 76			rx_desc[j].addr1 = 0;
 77
 78			if (rx_skbinfo[j].skb) {
 79				dma_unmap_single(&comm->pdev->dev, rx_skbinfo[j].mapping,
 80						 comm->rx_desc_buff_size, DMA_FROM_DEVICE);
 81				dev_kfree_skb_any(rx_skbinfo[j].skb);
 82				rx_skbinfo[j].skb = NULL;
 83				rx_skbinfo[j].mapping = 0;
 84			}
 85		}
 86
 87		kfree(rx_skbinfo);
 88		comm->rx_skb_info[i] = NULL;
 89	}
 90}
 91
 92void spl2sw_descs_clean(struct spl2sw_common *comm)
 93{
 94	spl2sw_rx_descs_clean(comm);
 95	spl2sw_tx_descs_clean(comm);
 96}
 97
 98void spl2sw_descs_free(struct spl2sw_common *comm)
 99{
100	u32 i;
101
102	spl2sw_descs_clean(comm);
103	comm->tx_desc = NULL;
104	for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
105		comm->rx_desc[i] = NULL;
106
107	/*  Free descriptor area  */
108	if (comm->desc_base) {
109		dma_free_coherent(&comm->pdev->dev, comm->desc_size, comm->desc_base,
110				  comm->desc_dma);
111		comm->desc_base = NULL;
112		comm->desc_dma = 0;
113		comm->desc_size = 0;
114	}
115}
116
117void spl2sw_tx_descs_init(struct spl2sw_common *comm)
118{
119	memset(comm->tx_desc, '\0', sizeof(struct spl2sw_mac_desc) *
120	       (TX_DESC_NUM + MAC_GUARD_DESC_NUM));
121}
122
123int spl2sw_rx_descs_init(struct spl2sw_common *comm)
124{
125	struct spl2sw_skb_info *rx_skbinfo;
126	struct spl2sw_mac_desc *rx_desc;
127	struct sk_buff *skb;
128	u32 mapping;
129	u32 i, j;
130
131	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
132		comm->rx_skb_info[i] = kcalloc(comm->rx_desc_num[i], sizeof(*rx_skbinfo),
133					       GFP_KERNEL | GFP_DMA);
134		if (!comm->rx_skb_info[i])
135			goto mem_alloc_fail;
136
137		rx_skbinfo = comm->rx_skb_info[i];
138		rx_desc = comm->rx_desc[i];
139		for (j = 0; j < comm->rx_desc_num[i]; j++) {
140			skb = netdev_alloc_skb(NULL, comm->rx_desc_buff_size);
141			if (!skb)
142				goto mem_alloc_fail;
143
144			rx_skbinfo[j].skb = skb;
145			mapping = dma_map_single(&comm->pdev->dev, skb->data,
146						 comm->rx_desc_buff_size,
147						 DMA_FROM_DEVICE);
148			if (dma_mapping_error(&comm->pdev->dev, mapping))
149				goto mem_alloc_fail;
150
151			rx_skbinfo[j].mapping = mapping;
152			rx_desc[j].addr1 = mapping;
153			rx_desc[j].addr2 = 0;
154			rx_desc[j].cmd2 = (j == comm->rx_desc_num[i] - 1) ?
155					  RXD_EOR | comm->rx_desc_buff_size :
156					  comm->rx_desc_buff_size;
157			wmb();	/* Set RXD_OWN after other fields are effective. */
158			rx_desc[j].cmd1 = RXD_OWN;
159		}
160	}
161
162	return 0;
163
164mem_alloc_fail:
165	spl2sw_rx_descs_clean(comm);
166	return -ENOMEM;
167}
168
169int spl2sw_descs_alloc(struct spl2sw_common *comm)
170{
171	s32 desc_size;
172	u32 i;
173
174	/* Alloc descriptor area  */
175	desc_size = (TX_DESC_NUM + MAC_GUARD_DESC_NUM) * sizeof(struct spl2sw_mac_desc);
176	for (i = 0; i < RX_DESC_QUEUE_NUM; i++)
177		desc_size += comm->rx_desc_num[i] * sizeof(struct spl2sw_mac_desc);
178
179	comm->desc_base = dma_alloc_coherent(&comm->pdev->dev, desc_size, &comm->desc_dma,
180					     GFP_KERNEL);
181	if (!comm->desc_base)
182		return -ENOMEM;
183
184	comm->desc_size = desc_size;
185
186	/* Setup Tx descriptor */
187	comm->tx_desc = comm->desc_base;
188
189	/* Setup Rx descriptor */
190	comm->rx_desc[0] = &comm->tx_desc[TX_DESC_NUM + MAC_GUARD_DESC_NUM];
191	for (i = 1; i < RX_DESC_QUEUE_NUM; i++)
192		comm->rx_desc[i] = comm->rx_desc[i - 1] + comm->rx_desc_num[i - 1];
193
194	return 0;
195}
196
197int spl2sw_descs_init(struct spl2sw_common *comm)
198{
199	u32 i, ret;
200
201	/* Initialize rx descriptor's data */
202	comm->rx_desc_num[0] = RX_QUEUE0_DESC_NUM;
203	comm->rx_desc_num[1] = RX_QUEUE1_DESC_NUM;
204
205	for (i = 0; i < RX_DESC_QUEUE_NUM; i++) {
206		comm->rx_desc[i] = NULL;
207		comm->rx_skb_info[i] = NULL;
208		comm->rx_pos[i] = 0;
209	}
210	comm->rx_desc_buff_size = MAC_RX_LEN_MAX;
211
212	/* Initialize tx descriptor's data */
213	comm->tx_done_pos = 0;
214	comm->tx_desc = NULL;
215	comm->tx_pos = 0;
216	comm->tx_desc_full = 0;
217	for (i = 0; i < TX_DESC_NUM; i++)
218		comm->tx_temp_skb_info[i].skb = NULL;
219
220	/* Allocate tx & rx descriptors. */
221	ret = spl2sw_descs_alloc(comm);
222	if (ret)
223		return ret;
224
225	spl2sw_tx_descs_init(comm);
226
227	return spl2sw_rx_descs_init(comm);
228}