Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0+
  2/* Microchip Sparx5 Switch driver
  3 *
  4 * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries.
  5 */
  6
  7#include "sparx5_main_regs.h"
  8#include "sparx5_main.h"
  9
 10#define XTR_EOF_0     ntohl((__force __be32)0x80000000u)
 11#define XTR_EOF_1     ntohl((__force __be32)0x80000001u)
 12#define XTR_EOF_2     ntohl((__force __be32)0x80000002u)
 13#define XTR_EOF_3     ntohl((__force __be32)0x80000003u)
 14#define XTR_PRUNED    ntohl((__force __be32)0x80000004u)
 15#define XTR_ABORT     ntohl((__force __be32)0x80000005u)
 16#define XTR_ESCAPE    ntohl((__force __be32)0x80000006u)
 17#define XTR_NOT_READY ntohl((__force __be32)0x80000007u)
 18
 19#define XTR_VALID_BYTES(x)      (4 - ((x) & 3))
 20
 21#define INJ_TIMEOUT_NS 50000
 22
 23void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp)
 24{
 25	/* Start flush */
 26	spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH);
 27
 28	/* Allow to drain */
 29	mdelay(1);
 30
 31	/* All Queues normal */
 32	spx5_wr(0, sparx5, QS_XTR_FLUSH);
 33}
 34
 35void sparx5_ifh_parse(u32 *ifh, struct frame_info *info)
 36{
 37	u8 *xtr_hdr = (u8 *)ifh;
 38
 39	/* FWD is bit 45-72 (28 bits), but we only read the 27 LSB for now */
 40	u32 fwd =
 41		((u32)xtr_hdr[27] << 24) |
 42		((u32)xtr_hdr[28] << 16) |
 43		((u32)xtr_hdr[29] <<  8) |
 44		((u32)xtr_hdr[30] <<  0);
 45	fwd = (fwd >> 5);
 46	info->src_port = FIELD_GET(GENMASK(7, 1), fwd);
 47
 48	info->timestamp =
 49		((u64)xtr_hdr[2] << 24) |
 50		((u64)xtr_hdr[3] << 16) |
 51		((u64)xtr_hdr[4] <<  8) |
 52		((u64)xtr_hdr[5] <<  0);
 53}
 54
 55static void sparx5_xtr_grp(struct sparx5 *sparx5, u8 grp, bool byte_swap)
 56{
 57	bool eof_flag = false, pruned_flag = false, abort_flag = false;
 58	struct net_device *netdev;
 59	struct sparx5_port *port;
 60	struct frame_info fi;
 61	int i, byte_cnt = 0;
 62	struct sk_buff *skb;
 63	u32 ifh[IFH_LEN];
 64	u32 *rxbuf;
 65
 66	/* Get IFH */
 67	for (i = 0; i < IFH_LEN; i++)
 68		ifh[i] = spx5_rd(sparx5, QS_XTR_RD(grp));
 69
 70	/* Decode IFH (whats needed) */
 71	sparx5_ifh_parse(ifh, &fi);
 72
 73	/* Map to port netdev */
 74	port = fi.src_port < SPX5_PORTS ?
 75		sparx5->ports[fi.src_port] : NULL;
 76	if (!port || !port->ndev) {
 77		dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port);
 78		sparx5_xtr_flush(sparx5, grp);
 79		return;
 80	}
 81
 82	/* Have netdev, get skb */
 83	netdev = port->ndev;
 84	skb = netdev_alloc_skb(netdev, netdev->mtu + ETH_HLEN);
 85	if (!skb) {
 86		sparx5_xtr_flush(sparx5, grp);
 87		dev_err(sparx5->dev, "No skb allocated\n");
 88		netdev->stats.rx_dropped++;
 89		return;
 90	}
 91	rxbuf = (u32 *)skb->data;
 92
 93	/* Now, pull frame data */
 94	while (!eof_flag) {
 95		u32 val = spx5_rd(sparx5, QS_XTR_RD(grp));
 96		u32 cmp = val;
 97
 98		if (byte_swap)
 99			cmp = ntohl((__force __be32)val);
100
101		switch (cmp) {
102		case XTR_NOT_READY:
103			break;
104		case XTR_ABORT:
105			/* No accompanying data */
106			abort_flag = true;
107			eof_flag = true;
108			break;
109		case XTR_EOF_0:
110		case XTR_EOF_1:
111		case XTR_EOF_2:
112		case XTR_EOF_3:
113			/* This assumes STATUS_WORD_POS == 1, Status
114			 * just after last data
115			 */
116			if (!byte_swap)
117				val = ntohl((__force __be32)val);
118			byte_cnt -= (4 - XTR_VALID_BYTES(val));
119			eof_flag = true;
120			break;
121		case XTR_PRUNED:
122			/* But get the last 4 bytes as well */
123			eof_flag = true;
124			pruned_flag = true;
125			fallthrough;
126		case XTR_ESCAPE:
127			*rxbuf = spx5_rd(sparx5, QS_XTR_RD(grp));
128			byte_cnt += 4;
129			rxbuf++;
130			break;
131		default:
132			*rxbuf = val;
133			byte_cnt += 4;
134			rxbuf++;
135		}
136	}
137
138	if (abort_flag || pruned_flag || !eof_flag) {
139		netdev_err(netdev, "Discarded frame: abort:%d pruned:%d eof:%d\n",
140			   abort_flag, pruned_flag, eof_flag);
141		kfree_skb(skb);
142		netdev->stats.rx_dropped++;
143		return;
144	}
145
146	/* Everything we see on an interface that is in the HW bridge
147	 * has already been forwarded
148	 */
149	if (test_bit(port->portno, sparx5->bridge_mask))
150		skb->offload_fwd_mark = 1;
151
152	/* Finish up skb */
153	skb_put(skb, byte_cnt - ETH_FCS_LEN);
154	eth_skb_pad(skb);
155	sparx5_ptp_rxtstamp(sparx5, skb, fi.timestamp);
156	skb->protocol = eth_type_trans(skb, netdev);
157	netdev->stats.rx_bytes += skb->len;
158	netdev->stats.rx_packets++;
159	netif_rx(skb);
160}
161
162static int sparx5_inject(struct sparx5 *sparx5,
163			 u32 *ifh,
164			 struct sk_buff *skb,
165			 struct net_device *ndev)
166{
167	int grp = INJ_QUEUE;
168	u32 val, w, count;
169	u8 *buf;
170
171	val = spx5_rd(sparx5, QS_INJ_STATUS);
172	if (!(QS_INJ_STATUS_FIFO_RDY_GET(val) & BIT(grp))) {
173		pr_err_ratelimited("Injection: Queue not ready: 0x%lx\n",
174				   QS_INJ_STATUS_FIFO_RDY_GET(val));
175		return -EBUSY;
176	}
177
178	/* Indicate SOF */
179	spx5_wr(QS_INJ_CTRL_SOF_SET(1) |
180		QS_INJ_CTRL_GAP_SIZE_SET(1),
181		sparx5, QS_INJ_CTRL(grp));
182
183	/* Write the IFH to the chip. */
184	for (w = 0; w < IFH_LEN; w++)
185		spx5_wr(ifh[w], sparx5, QS_INJ_WR(grp));
186
187	/* Write words, round up */
188	count = DIV_ROUND_UP(skb->len, 4);
189	buf = skb->data;
190	for (w = 0; w < count; w++, buf += 4) {
191		val = get_unaligned((const u32 *)buf);
192		spx5_wr(val, sparx5, QS_INJ_WR(grp));
193	}
194
195	/* Add padding */
196	while (w < (60 / 4)) {
197		spx5_wr(0, sparx5, QS_INJ_WR(grp));
198		w++;
199	}
200
201	/* Indicate EOF and valid bytes in last word */
202	spx5_wr(QS_INJ_CTRL_GAP_SIZE_SET(1) |
203		QS_INJ_CTRL_VLD_BYTES_SET(skb->len < 60 ? 0 : skb->len % 4) |
204		QS_INJ_CTRL_EOF_SET(1),
205		sparx5, QS_INJ_CTRL(grp));
206
207	/* Add dummy CRC */
208	spx5_wr(0, sparx5, QS_INJ_WR(grp));
209	w++;
210
211	val = spx5_rd(sparx5, QS_INJ_STATUS);
212	if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
213		struct sparx5_port *port = netdev_priv(ndev);
214
215		pr_err_ratelimited("Injection: Watermark reached: 0x%lx\n",
216				   QS_INJ_STATUS_WMARK_REACHED_GET(val));
217		netif_stop_queue(ndev);
218		hrtimer_start(&port->inj_timer, INJ_TIMEOUT_NS,
219			      HRTIMER_MODE_REL);
220	}
221
222	return NETDEV_TX_OK;
223}
224
225netdev_tx_t sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev)
226{
227	struct net_device_stats *stats = &dev->stats;
228	struct sparx5_port *port = netdev_priv(dev);
229	struct sparx5 *sparx5 = port->sparx5;
230	u32 ifh[IFH_LEN];
231	netdev_tx_t ret;
232
233	memset(ifh, 0, IFH_LEN * 4);
234	sparx5_set_port_ifh(ifh, port->portno);
235
236	if (sparx5->ptp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
237		if (sparx5_ptp_txtstamp_request(port, skb) < 0)
238			return NETDEV_TX_BUSY;
239
240		sparx5_set_port_ifh_rew_op(ifh, SPARX5_SKB_CB(skb)->rew_op);
241		sparx5_set_port_ifh_pdu_type(ifh, SPARX5_SKB_CB(skb)->pdu_type);
242		sparx5_set_port_ifh_pdu_w16_offset(ifh, SPARX5_SKB_CB(skb)->pdu_w16_offset);
243		sparx5_set_port_ifh_timestamp(ifh, SPARX5_SKB_CB(skb)->ts_id);
244	}
245
246	skb_tx_timestamp(skb);
247	spin_lock(&sparx5->tx_lock);
248	if (sparx5->fdma_irq > 0)
249		ret = sparx5_fdma_xmit(sparx5, ifh, skb);
250	else
251		ret = sparx5_inject(sparx5, ifh, skb, dev);
252	spin_unlock(&sparx5->tx_lock);
253
254	if (ret == -EBUSY)
255		goto busy;
256	if (ret < 0)
257		goto drop;
258
259	stats->tx_bytes += skb->len;
260	stats->tx_packets++;
261	sparx5->tx.packets++;
262
263	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
264	    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
265		return NETDEV_TX_OK;
266
267	dev_consume_skb_any(skb);
268	return NETDEV_TX_OK;
269drop:
270	stats->tx_dropped++;
271	sparx5->tx.dropped++;
272	dev_kfree_skb_any(skb);
273	return NETDEV_TX_OK;
274busy:
275	if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
276	    SPARX5_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
277		sparx5_ptp_txtstamp_release(port, skb);
278	return NETDEV_TX_BUSY;
279}
280
281static enum hrtimer_restart sparx5_injection_timeout(struct hrtimer *tmr)
282{
283	struct sparx5_port *port = container_of(tmr, struct sparx5_port,
284						inj_timer);
285	int grp = INJ_QUEUE;
286	u32 val;
287
288	val = spx5_rd(port->sparx5, QS_INJ_STATUS);
289	if (QS_INJ_STATUS_WMARK_REACHED_GET(val) & BIT(grp)) {
290		pr_err_ratelimited("Injection: Reset watermark count\n");
291		/* Reset Watermark count to restart */
292		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
293			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
294			 port->sparx5,
295			 DSM_DEV_TX_STOP_WM_CFG(port->portno));
296	}
297	netif_wake_queue(port->ndev);
298	return HRTIMER_NORESTART;
299}
300
301int sparx5_manual_injection_mode(struct sparx5 *sparx5)
302{
303	const int byte_swap = 1;
304	int portno;
305
306	/* Change mode to manual extraction and injection */
307	spx5_wr(QS_XTR_GRP_CFG_MODE_SET(1) |
308		QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) |
309		QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap),
310		sparx5, QS_XTR_GRP_CFG(XTR_QUEUE));
311	spx5_wr(QS_INJ_GRP_CFG_MODE_SET(1) |
312		QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap),
313		sparx5, QS_INJ_GRP_CFG(INJ_QUEUE));
314
315	/* CPU ports capture setup */
316	for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) {
317		/* ASM CPU port: No preamble, IFH, enable padding */
318		spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) |
319			ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) |
320			ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */
321			sparx5, ASM_PORT_CFG(portno));
322
323		/* Reset WM cnt to unclog queued frames */
324		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1),
325			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR,
326			 sparx5,
327			 DSM_DEV_TX_STOP_WM_CFG(portno));
328
329		/* Set Disassembler Stop Watermark level */
330		spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(0),
331			 DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM,
332			 sparx5,
333			 DSM_DEV_TX_STOP_WM_CFG(portno));
334
335		/* Enable Disassembler buffer underrun watchdog
336		 */
337		spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(0),
338			 DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS,
339			 sparx5,
340			 DSM_BUF_CFG(portno));
341	}
342	return 0;
343}
344
345irqreturn_t sparx5_xtr_handler(int irq, void *_sparx5)
346{
347	struct sparx5 *s5 = _sparx5;
348	int poll = 64;
349
350	/* Check data in queue */
351	while (spx5_rd(s5, QS_XTR_DATA_PRESENT) & BIT(XTR_QUEUE) && poll-- > 0)
352		sparx5_xtr_grp(s5, XTR_QUEUE, false);
353
354	return IRQ_HANDLED;
355}
356
357void sparx5_port_inj_timer_setup(struct sparx5_port *port)
358{
359	hrtimer_init(&port->inj_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
360	port->inj_timer.function = sparx5_injection_timeout;
361}