Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#include <linux/ethtool_netlink.h>
  4#include <net/udp_tunnel.h>
  5#include <net/vxlan.h>
  6
  7#include "bitset.h"
  8#include "common.h"
  9#include "netlink.h"
 10
 11const struct nla_policy ethnl_tunnel_info_get_policy[] = {
 12	[ETHTOOL_A_TUNNEL_INFO_HEADER]		=
 13		NLA_POLICY_NESTED(ethnl_header_policy),
 14};
 15
 16static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
 17static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
 18static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
 19	      ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
 20
 21static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
 22{
 23	ssize_t size;
 24
 25	size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
 26				   udp_tunnel_type_names, compact);
 27	if (size < 0)
 28		return size;
 29
 30	return size +
 31		nla_total_size(0) + /* _UDP_TABLE */
 32		nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
 33}
 34
 35static ssize_t
 36ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
 37			     struct netlink_ext_ack *extack)
 38{
 39	bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
 40	const struct udp_tunnel_nic_info *info;
 41	unsigned int i;
 42	ssize_t ret;
 43	size_t size;
 44
 45	info = req_base->dev->udp_tunnel_nic_info;
 46	if (!info) {
 47		NL_SET_ERR_MSG(extack,
 48			       "device does not report tunnel offload info");
 49		return -EOPNOTSUPP;
 50	}
 51
 52	size =	nla_total_size(0); /* _INFO_UDP_PORTS */
 53
 54	for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
 55		if (!info->tables[i].n_entries)
 56			break;
 57
 58		ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
 59						 compact);
 60		if (ret < 0)
 61			return ret;
 62		size += ret;
 63
 64		size += udp_tunnel_nic_dump_size(req_base->dev, i);
 65	}
 66
 67	if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
 68		ret = ethnl_udp_table_reply_size(0, compact);
 69		if (ret < 0)
 70			return ret;
 71		size += ret;
 72
 73		size += nla_total_size(0) +		 /* _TABLE_ENTRY */
 74			nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
 75			nla_total_size(sizeof(u32));	 /* _ENTRY_TYPE */
 76	}
 77
 78	return size;
 79}
 80
 81static int
 82ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
 83			     struct sk_buff *skb)
 84{
 85	bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
 86	const struct udp_tunnel_nic_info *info;
 87	struct nlattr *ports, *table, *entry;
 88	unsigned int i;
 89
 90	info = req_base->dev->udp_tunnel_nic_info;
 91	if (!info)
 92		return -EOPNOTSUPP;
 93
 94	ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
 95	if (!ports)
 96		return -EMSGSIZE;
 97
 98	for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
 99		if (!info->tables[i].n_entries)
100			break;
101
102		table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
103		if (!table)
104			goto err_cancel_ports;
105
106		if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
107				info->tables[i].n_entries))
108			goto err_cancel_table;
109
110		if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
111				       &info->tables[i].tunnel_types, NULL,
112				       __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
113				       udp_tunnel_type_names, compact))
114			goto err_cancel_table;
115
116		if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
117			goto err_cancel_table;
118
119		nla_nest_end(skb, table);
120	}
121
122	if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
123		u32 zero = 0;
124
125		table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
126		if (!table)
127			goto err_cancel_ports;
128
129		if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
130			goto err_cancel_table;
131
132		if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
133				       &zero, NULL,
134				       __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
135				       udp_tunnel_type_names, compact))
136			goto err_cancel_table;
137
138		entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
139		if (!entry)
140			goto err_cancel_entry;
141
142		if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
143				 htons(IANA_VXLAN_UDP_PORT)) ||
144		    nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
145				ilog2(UDP_TUNNEL_TYPE_VXLAN)))
146			goto err_cancel_entry;
147
148		nla_nest_end(skb, entry);
149		nla_nest_end(skb, table);
150	}
151
152	nla_nest_end(skb, ports);
153
154	return 0;
155
156err_cancel_entry:
157	nla_nest_cancel(skb, entry);
158err_cancel_table:
159	nla_nest_cancel(skb, table);
160err_cancel_ports:
161	nla_nest_cancel(skb, ports);
162	return -EMSGSIZE;
163}
164
165int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
166{
167	struct ethnl_req_info req_info = {};
168	struct nlattr **tb = info->attrs;
169	struct sk_buff *rskb;
170	void *reply_payload;
171	int reply_len;
172	int ret;
173
174	ret = ethnl_parse_header_dev_get(&req_info,
175					 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
176					 genl_info_net(info), info->extack,
177					 true);
178	if (ret < 0)
179		return ret;
180
181	rtnl_lock();
182	ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
183	if (ret < 0)
184		goto err_unlock_rtnl;
185	reply_len = ret + ethnl_reply_header_size();
186
187	rskb = ethnl_reply_init(reply_len, req_info.dev,
188				ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
189				ETHTOOL_A_TUNNEL_INFO_HEADER,
190				info, &reply_payload);
191	if (!rskb) {
192		ret = -ENOMEM;
193		goto err_unlock_rtnl;
194	}
195
196	ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
197	if (ret)
198		goto err_free_msg;
199	rtnl_unlock();
200	ethnl_parse_header_dev_put(&req_info);
201	genlmsg_end(rskb, reply_payload);
202
203	return genlmsg_reply(rskb, info);
204
205err_free_msg:
206	nlmsg_free(rskb);
207err_unlock_rtnl:
208	rtnl_unlock();
209	ethnl_parse_header_dev_put(&req_info);
210	return ret;
211}
212
213struct ethnl_tunnel_info_dump_ctx {
214	struct ethnl_req_info	req_info;
215	unsigned long		ifindex;
216};
217
218int ethnl_tunnel_info_start(struct netlink_callback *cb)
219{
220	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
221	struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
222	struct nlattr **tb = info->info.attrs;
223	int ret;
224
225	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
226
227	memset(ctx, 0, sizeof(*ctx));
228
229	ret = ethnl_parse_header_dev_get(&ctx->req_info,
230					 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
231					 sock_net(cb->skb->sk), cb->extack,
232					 false);
233	if (ctx->req_info.dev) {
234		ethnl_parse_header_dev_put(&ctx->req_info);
235		ctx->req_info.dev = NULL;
236	}
237
238	return ret;
239}
240
241int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
242{
243	struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
244	struct net *net = sock_net(skb->sk);
245	struct net_device *dev;
246	int ret = 0;
247	void *ehdr;
248
249	rtnl_lock();
250	for_each_netdev_dump(net, dev, ctx->ifindex) {
251		ehdr = ethnl_dump_put(skb, cb,
252				      ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
253		if (!ehdr) {
254			ret = -EMSGSIZE;
255			break;
256		}
257
258		ret = ethnl_fill_reply_header(skb, dev,
259					      ETHTOOL_A_TUNNEL_INFO_HEADER);
260		if (ret < 0) {
261			genlmsg_cancel(skb, ehdr);
262			break;
263		}
264
265		ctx->req_info.dev = dev;
266		ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
267		ctx->req_info.dev = NULL;
268		if (ret < 0) {
269			genlmsg_cancel(skb, ehdr);
270			if (ret == -EOPNOTSUPP)
271				continue;
272			break;
273		}
274		genlmsg_end(skb, ehdr);
275	}
276	rtnl_unlock();
277
278	if (ret == -EMSGSIZE && skb->len)
279		return skb->len;
280	return ret;
281}
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3#include <linux/ethtool_netlink.h>
  4#include <net/udp_tunnel.h>
  5#include <net/vxlan.h>
  6
  7#include "bitset.h"
  8#include "common.h"
  9#include "netlink.h"
 10
 11const struct nla_policy ethnl_tunnel_info_get_policy[] = {
 12	[ETHTOOL_A_TUNNEL_INFO_HEADER]		=
 13		NLA_POLICY_NESTED(ethnl_header_policy),
 14};
 15
 16static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN == ilog2(UDP_TUNNEL_TYPE_VXLAN));
 17static_assert(ETHTOOL_UDP_TUNNEL_TYPE_GENEVE == ilog2(UDP_TUNNEL_TYPE_GENEVE));
 18static_assert(ETHTOOL_UDP_TUNNEL_TYPE_VXLAN_GPE ==
 19	      ilog2(UDP_TUNNEL_TYPE_VXLAN_GPE));
 20
 21static ssize_t ethnl_udp_table_reply_size(unsigned int types, bool compact)
 22{
 23	ssize_t size;
 24
 25	size = ethnl_bitset32_size(&types, NULL, __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
 26				   udp_tunnel_type_names, compact);
 27	if (size < 0)
 28		return size;
 29
 30	return size +
 31		nla_total_size(0) + /* _UDP_TABLE */
 32		nla_total_size(sizeof(u32)); /* _UDP_TABLE_SIZE */
 33}
 34
 35static ssize_t
 36ethnl_tunnel_info_reply_size(const struct ethnl_req_info *req_base,
 37			     struct netlink_ext_ack *extack)
 38{
 39	bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
 40	const struct udp_tunnel_nic_info *info;
 41	unsigned int i;
 42	ssize_t ret;
 43	size_t size;
 44
 45	info = req_base->dev->udp_tunnel_nic_info;
 46	if (!info) {
 47		NL_SET_ERR_MSG(extack,
 48			       "device does not report tunnel offload info");
 49		return -EOPNOTSUPP;
 50	}
 51
 52	size =	nla_total_size(0); /* _INFO_UDP_PORTS */
 53
 54	for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
 55		if (!info->tables[i].n_entries)
 56			break;
 57
 58		ret = ethnl_udp_table_reply_size(info->tables[i].tunnel_types,
 59						 compact);
 60		if (ret < 0)
 61			return ret;
 62		size += ret;
 63
 64		size += udp_tunnel_nic_dump_size(req_base->dev, i);
 65	}
 66
 67	if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
 68		ret = ethnl_udp_table_reply_size(0, compact);
 69		if (ret < 0)
 70			return ret;
 71		size += ret;
 72
 73		size += nla_total_size(0) +		 /* _TABLE_ENTRY */
 74			nla_total_size(sizeof(__be16)) + /* _ENTRY_PORT */
 75			nla_total_size(sizeof(u32));	 /* _ENTRY_TYPE */
 76	}
 77
 78	return size;
 79}
 80
 81static int
 82ethnl_tunnel_info_fill_reply(const struct ethnl_req_info *req_base,
 83			     struct sk_buff *skb)
 84{
 85	bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS;
 86	const struct udp_tunnel_nic_info *info;
 87	struct nlattr *ports, *table, *entry;
 88	unsigned int i;
 89
 90	info = req_base->dev->udp_tunnel_nic_info;
 91	if (!info)
 92		return -EOPNOTSUPP;
 93
 94	ports = nla_nest_start(skb, ETHTOOL_A_TUNNEL_INFO_UDP_PORTS);
 95	if (!ports)
 96		return -EMSGSIZE;
 97
 98	for (i = 0; i < UDP_TUNNEL_NIC_MAX_TABLES; i++) {
 99		if (!info->tables[i].n_entries)
100			break;
101
102		table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
103		if (!table)
104			goto err_cancel_ports;
105
106		if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE,
107				info->tables[i].n_entries))
108			goto err_cancel_table;
109
110		if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
111				       &info->tables[i].tunnel_types, NULL,
112				       __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
113				       udp_tunnel_type_names, compact))
114			goto err_cancel_table;
115
116		if (udp_tunnel_nic_dump_write(req_base->dev, i, skb))
117			goto err_cancel_table;
118
119		nla_nest_end(skb, table);
120	}
121
122	if (info->flags & UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN) {
123		u32 zero = 0;
124
125		table = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE);
126		if (!table)
127			goto err_cancel_ports;
128
129		if (nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_SIZE, 1))
130			goto err_cancel_table;
131
132		if (ethnl_put_bitset32(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_TYPES,
133				       &zero, NULL,
134				       __ETHTOOL_UDP_TUNNEL_TYPE_CNT,
135				       udp_tunnel_type_names, compact))
136			goto err_cancel_table;
137
138		entry = nla_nest_start(skb, ETHTOOL_A_TUNNEL_UDP_TABLE_ENTRY);
139		if (!entry)
140			goto err_cancel_entry;
141
142		if (nla_put_be16(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_PORT,
143				 htons(IANA_VXLAN_UDP_PORT)) ||
144		    nla_put_u32(skb, ETHTOOL_A_TUNNEL_UDP_ENTRY_TYPE,
145				ilog2(UDP_TUNNEL_TYPE_VXLAN)))
146			goto err_cancel_entry;
147
148		nla_nest_end(skb, entry);
149		nla_nest_end(skb, table);
150	}
151
152	nla_nest_end(skb, ports);
153
154	return 0;
155
156err_cancel_entry:
157	nla_nest_cancel(skb, entry);
158err_cancel_table:
159	nla_nest_cancel(skb, table);
160err_cancel_ports:
161	nla_nest_cancel(skb, ports);
162	return -EMSGSIZE;
163}
164
165int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info)
166{
167	struct ethnl_req_info req_info = {};
168	struct nlattr **tb = info->attrs;
169	struct sk_buff *rskb;
170	void *reply_payload;
171	int reply_len;
172	int ret;
173
174	ret = ethnl_parse_header_dev_get(&req_info,
175					 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
176					 genl_info_net(info), info->extack,
177					 true);
178	if (ret < 0)
179		return ret;
180
181	rtnl_lock();
182	ret = ethnl_tunnel_info_reply_size(&req_info, info->extack);
183	if (ret < 0)
184		goto err_unlock_rtnl;
185	reply_len = ret + ethnl_reply_header_size();
186
187	rskb = ethnl_reply_init(reply_len, req_info.dev,
188				ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY,
189				ETHTOOL_A_TUNNEL_INFO_HEADER,
190				info, &reply_payload);
191	if (!rskb) {
192		ret = -ENOMEM;
193		goto err_unlock_rtnl;
194	}
195
196	ret = ethnl_tunnel_info_fill_reply(&req_info, rskb);
197	if (ret)
198		goto err_free_msg;
199	rtnl_unlock();
200	ethnl_parse_header_dev_put(&req_info);
201	genlmsg_end(rskb, reply_payload);
202
203	return genlmsg_reply(rskb, info);
204
205err_free_msg:
206	nlmsg_free(rskb);
207err_unlock_rtnl:
208	rtnl_unlock();
209	ethnl_parse_header_dev_put(&req_info);
210	return ret;
211}
212
213struct ethnl_tunnel_info_dump_ctx {
214	struct ethnl_req_info	req_info;
215	unsigned long		ifindex;
216};
217
218int ethnl_tunnel_info_start(struct netlink_callback *cb)
219{
220	const struct genl_dumpit_info *info = genl_dumpit_info(cb);
221	struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
222	struct nlattr **tb = info->info.attrs;
223	int ret;
224
225	BUILD_BUG_ON(sizeof(*ctx) > sizeof(cb->ctx));
226
227	memset(ctx, 0, sizeof(*ctx));
228
229	ret = ethnl_parse_header_dev_get(&ctx->req_info,
230					 tb[ETHTOOL_A_TUNNEL_INFO_HEADER],
231					 sock_net(cb->skb->sk), cb->extack,
232					 false);
233	if (ctx->req_info.dev) {
234		ethnl_parse_header_dev_put(&ctx->req_info);
235		ctx->req_info.dev = NULL;
236	}
237
238	return ret;
239}
240
241int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
242{
243	struct ethnl_tunnel_info_dump_ctx *ctx = (void *)cb->ctx;
244	struct net *net = sock_net(skb->sk);
245	struct net_device *dev;
246	int ret = 0;
247	void *ehdr;
248
249	rtnl_lock();
250	for_each_netdev_dump(net, dev, ctx->ifindex) {
251		ehdr = ethnl_dump_put(skb, cb,
252				      ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY);
253		if (!ehdr) {
254			ret = -EMSGSIZE;
255			break;
256		}
257
258		ret = ethnl_fill_reply_header(skb, dev,
259					      ETHTOOL_A_TUNNEL_INFO_HEADER);
260		if (ret < 0) {
261			genlmsg_cancel(skb, ehdr);
262			break;
263		}
264
265		ctx->req_info.dev = dev;
266		ret = ethnl_tunnel_info_fill_reply(&ctx->req_info, skb);
267		ctx->req_info.dev = NULL;
268		if (ret < 0) {
269			genlmsg_cancel(skb, ehdr);
270			if (ret == -EOPNOTSUPP)
271				continue;
272			break;
273		}
274		genlmsg_end(skb, ehdr);
275	}
276	rtnl_unlock();
277
278	if (ret == -EMSGSIZE && skb->len)
279		return skb->len;
280	return ret;
281}