Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
  2/*
  3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
  4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
  5 */
  6
  7#include <linux/skbuff.h>
  8
  9#include "rxe.h"
 10#include "rxe_loc.h"
 11
 12/* check that QP matches packet opcode type and is in a valid state */
 13static int check_type_state(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
 14			    struct rxe_qp *qp)
 15{
 16	unsigned int pkt_type;
 17
 18	if (unlikely(!qp->valid))
 19		return -EINVAL;
 20
 21	pkt_type = pkt->opcode & 0xe0;
 22
 23	switch (qp_type(qp)) {
 24	case IB_QPT_RC:
 25		if (unlikely(pkt_type != IB_OPCODE_RC))
 26			return -EINVAL;
 27		break;
 28	case IB_QPT_UC:
 29		if (unlikely(pkt_type != IB_OPCODE_UC))
 30			return -EINVAL;
 31		break;
 32	case IB_QPT_UD:
 33	case IB_QPT_GSI:
 34		if (unlikely(pkt_type != IB_OPCODE_UD))
 35			return -EINVAL;
 36		break;
 37	default:
 38		return -EINVAL;
 39	}
 40
 41	if (pkt->mask & RXE_REQ_MASK) {
 42		if (unlikely(qp->resp.state != QP_STATE_READY))
 43			return -EINVAL;
 44	} else if (unlikely(qp->req.state < QP_STATE_READY ||
 45				qp->req.state > QP_STATE_DRAINED))
 46		return -EINVAL;
 47
 48	return 0;
 49}
 50
 51static void set_bad_pkey_cntr(struct rxe_port *port)
 52{
 53	spin_lock_bh(&port->port_lock);
 54	port->attr.bad_pkey_cntr = min((u32)0xffff,
 55				       port->attr.bad_pkey_cntr + 1);
 56	spin_unlock_bh(&port->port_lock);
 57}
 58
 59static void set_qkey_viol_cntr(struct rxe_port *port)
 60{
 61	spin_lock_bh(&port->port_lock);
 62	port->attr.qkey_viol_cntr = min((u32)0xffff,
 63					port->attr.qkey_viol_cntr + 1);
 64	spin_unlock_bh(&port->port_lock);
 65}
 66
 67static int check_keys(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
 68		      u32 qpn, struct rxe_qp *qp)
 69{
 70	struct rxe_port *port = &rxe->port;
 71	u16 pkey = bth_pkey(pkt);
 72
 73	pkt->pkey_index = 0;
 74
 75	if (!pkey_match(pkey, IB_DEFAULT_PKEY_FULL)) {
 76		set_bad_pkey_cntr(port);
 77		return -EINVAL;
 78	}
 79
 80	if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_GSI) {
 81		u32 qkey = (qpn == 1) ? GSI_QKEY : qp->attr.qkey;
 82
 83		if (unlikely(deth_qkey(pkt) != qkey)) {
 84			set_qkey_viol_cntr(port);
 85			return -EINVAL;
 86		}
 87	}
 88
 89	return 0;
 90}
 91
 92static int check_addr(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
 93		      struct rxe_qp *qp)
 94{
 95	struct sk_buff *skb = PKT_TO_SKB(pkt);
 96
 97	if (qp_type(qp) != IB_QPT_RC && qp_type(qp) != IB_QPT_UC)
 98		return 0;
 99
100	if (unlikely(pkt->port_num != qp->attr.port_num))
101		return -EINVAL;
102
103	if (skb->protocol == htons(ETH_P_IP)) {
104		struct in_addr *saddr =
105			&qp->pri_av.sgid_addr._sockaddr_in.sin_addr;
106		struct in_addr *daddr =
107			&qp->pri_av.dgid_addr._sockaddr_in.sin_addr;
108
109		if ((ip_hdr(skb)->daddr != saddr->s_addr) ||
110		    (ip_hdr(skb)->saddr != daddr->s_addr))
111			return -EINVAL;
112
113	} else if (skb->protocol == htons(ETH_P_IPV6)) {
114		struct in6_addr *saddr =
115			&qp->pri_av.sgid_addr._sockaddr_in6.sin6_addr;
116		struct in6_addr *daddr =
117			&qp->pri_av.dgid_addr._sockaddr_in6.sin6_addr;
118
119		if (memcmp(&ipv6_hdr(skb)->daddr, saddr, sizeof(*saddr)) ||
120		    memcmp(&ipv6_hdr(skb)->saddr, daddr, sizeof(*daddr)))
121			return -EINVAL;
122	}
123
124	return 0;
125}
126
127static int hdr_check(struct rxe_pkt_info *pkt)
128{
129	struct rxe_dev *rxe = pkt->rxe;
130	struct rxe_port *port = &rxe->port;
131	struct rxe_qp *qp = NULL;
132	u32 qpn = bth_qpn(pkt);
133	int index;
134	int err;
135
136	if (unlikely(bth_tver(pkt) != BTH_TVER))
137		goto err1;
138
139	if (unlikely(qpn == 0))
140		goto err1;
141
142	if (qpn != IB_MULTICAST_QPN) {
143		index = (qpn == 1) ? port->qp_gsi_index : qpn;
144
145		qp = rxe_pool_get_index(&rxe->qp_pool, index);
146		if (unlikely(!qp))
147			goto err1;
148
149		err = check_type_state(rxe, pkt, qp);
150		if (unlikely(err))
151			goto err2;
152
153		err = check_addr(rxe, pkt, qp);
154		if (unlikely(err))
155			goto err2;
156
157		err = check_keys(rxe, pkt, qpn, qp);
158		if (unlikely(err))
159			goto err2;
160	} else {
161		if (unlikely((pkt->mask & RXE_GRH_MASK) == 0))
162			goto err1;
163	}
164
165	pkt->qp = qp;
166	return 0;
167
168err2:
169	rxe_put(qp);
170err1:
171	return -EINVAL;
172}
173
174static inline void rxe_rcv_pkt(struct rxe_pkt_info *pkt, struct sk_buff *skb)
175{
176	if (pkt->mask & RXE_REQ_MASK)
177		rxe_resp_queue_pkt(pkt->qp, skb);
178	else
179		rxe_comp_queue_pkt(pkt->qp, skb);
180}
181
182static void rxe_rcv_mcast_pkt(struct rxe_dev *rxe, struct sk_buff *skb)
183{
184	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
185	struct rxe_mcg *mcg;
186	struct rxe_mca *mca;
187	struct rxe_qp *qp;
188	union ib_gid dgid;
189	int err;
190
191	if (skb->protocol == htons(ETH_P_IP))
192		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
193				       (struct in6_addr *)&dgid);
194	else if (skb->protocol == htons(ETH_P_IPV6))
195		memcpy(&dgid, &ipv6_hdr(skb)->daddr, sizeof(dgid));
196
197	/* lookup mcast group corresponding to mgid, takes a ref */
198	mcg = rxe_lookup_mcg(rxe, &dgid);
199	if (!mcg)
200		goto drop;	/* mcast group not registered */
201
202	spin_lock_bh(&rxe->mcg_lock);
203
204	/* this is unreliable datagram service so we let
205	 * failures to deliver a multicast packet to a
206	 * single QP happen and just move on and try
207	 * the rest of them on the list
208	 */
209	list_for_each_entry(mca, &mcg->qp_list, qp_list) {
210		qp = mca->qp;
211
212		/* validate qp for incoming packet */
213		err = check_type_state(rxe, pkt, qp);
214		if (err)
215			continue;
216
217		err = check_keys(rxe, pkt, bth_qpn(pkt), qp);
218		if (err)
219			continue;
220
221		/* for all but the last QP create a new clone of the
222		 * skb and pass to the QP. Pass the original skb to
223		 * the last QP in the list.
224		 */
225		if (mca->qp_list.next != &mcg->qp_list) {
226			struct sk_buff *cskb;
227			struct rxe_pkt_info *cpkt;
228
229			cskb = skb_clone(skb, GFP_ATOMIC);
230			if (unlikely(!cskb))
231				continue;
232
233			if (WARN_ON(!ib_device_try_get(&rxe->ib_dev))) {
234				kfree_skb(cskb);
235				break;
236			}
237
238			cpkt = SKB_TO_PKT(cskb);
239			cpkt->qp = qp;
240			rxe_get(qp);
241			rxe_rcv_pkt(cpkt, cskb);
242		} else {
243			pkt->qp = qp;
244			rxe_get(qp);
245			rxe_rcv_pkt(pkt, skb);
246			skb = NULL;	/* mark consumed */
247		}
248	}
249
250	spin_unlock_bh(&rxe->mcg_lock);
251
252	kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
253
254	if (likely(!skb))
255		return;
256
257	/* This only occurs if one of the checks fails on the last
258	 * QP in the list above
259	 */
260
261drop:
262	kfree_skb(skb);
263	ib_device_put(&rxe->ib_dev);
264}
265
266/**
267 * rxe_chk_dgid - validate destination IP address
268 * @rxe: rxe device that received packet
269 * @skb: the received packet buffer
270 *
271 * Accept any loopback packets
272 * Extract IP address from packet and
273 * Accept if multicast packet
274 * Accept if matches an SGID table entry
275 */
276static int rxe_chk_dgid(struct rxe_dev *rxe, struct sk_buff *skb)
277{
278	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
279	const struct ib_gid_attr *gid_attr;
280	union ib_gid dgid;
281	union ib_gid *pdgid;
282
283	if (pkt->mask & RXE_LOOPBACK_MASK)
284		return 0;
285
286	if (skb->protocol == htons(ETH_P_IP)) {
287		ipv6_addr_set_v4mapped(ip_hdr(skb)->daddr,
288				       (struct in6_addr *)&dgid);
289		pdgid = &dgid;
290	} else {
291		pdgid = (union ib_gid *)&ipv6_hdr(skb)->daddr;
292	}
293
294	if (rdma_is_multicast_addr((struct in6_addr *)pdgid))
295		return 0;
296
297	gid_attr = rdma_find_gid_by_port(&rxe->ib_dev, pdgid,
298					 IB_GID_TYPE_ROCE_UDP_ENCAP,
299					 1, skb->dev);
300	if (IS_ERR(gid_attr))
301		return PTR_ERR(gid_attr);
302
303	rdma_put_gid_attr(gid_attr);
304	return 0;
305}
306
307/* rxe_rcv is called from the interface driver */
308void rxe_rcv(struct sk_buff *skb)
309{
310	int err;
311	struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
312	struct rxe_dev *rxe = pkt->rxe;
313
314	if (unlikely(skb->len < RXE_BTH_BYTES))
315		goto drop;
316
317	if (rxe_chk_dgid(rxe, skb) < 0)
318		goto drop;
319
320	pkt->opcode = bth_opcode(pkt);
321	pkt->psn = bth_psn(pkt);
322	pkt->qp = NULL;
323	pkt->mask |= rxe_opcode[pkt->opcode].mask;
324
325	if (unlikely(skb->len < header_size(pkt)))
326		goto drop;
327
328	err = hdr_check(pkt);
329	if (unlikely(err))
330		goto drop;
331
332	err = rxe_icrc_check(skb, pkt);
333	if (unlikely(err))
334		goto drop;
335
336	rxe_counter_inc(rxe, RXE_CNT_RCVD_PKTS);
337
338	if (unlikely(bth_qpn(pkt) == IB_MULTICAST_QPN))
339		rxe_rcv_mcast_pkt(rxe, skb);
340	else
341		rxe_rcv_pkt(pkt, skb);
342
343	return;
344
345drop:
346	if (pkt->qp)
347		rxe_put(pkt->qp);
348
349	kfree_skb(skb);
350	ib_device_put(&rxe->ib_dev);
351}