Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2
  3/*
  4 *  HID-BPF support for Linux
  5 *
  6 *  Copyright (c) 2022 Benjamin Tissoires
  7 */
  8
  9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 10#include <linux/bitops.h>
 11#include <linux/btf.h>
 12#include <linux/btf_ids.h>
 13#include <linux/filter.h>
 14#include <linux/hid.h>
 15#include <linux/hid_bpf.h>
 16#include <linux/init.h>
 17#include <linux/kfifo.h>
 18#include <linux/minmax.h>
 19#include <linux/module.h>
 20#include <linux/workqueue.h>
 21#include "hid_bpf_dispatch.h"
 22#include "entrypoints/entrypoints.lskel.h"
 23
 24struct hid_bpf_ops *hid_bpf_ops;
 25EXPORT_SYMBOL(hid_bpf_ops);
 26
 27/**
 28 * hid_bpf_device_event - Called whenever an event is coming in from the device
 29 *
 30 * @ctx: The HID-BPF context
 31 *
 32 * @return %0 on success and keep processing; a positive value to change the
 33 * incoming size buffer; a negative error code to interrupt the processing
 34 * of this event
 35 *
 36 * Declare an %fmod_ret tracing bpf program to this function and attach this
 37 * program through hid_bpf_attach_prog() to have this helper called for
 38 * any incoming event from the device itself.
 39 *
 40 * The function is called while on IRQ context, so we can not sleep.
 41 */
 42/* never used by the kernel but declared so we can load and attach a tracepoint */
 43__weak noinline int hid_bpf_device_event(struct hid_bpf_ctx *ctx)
 44{
 45	return 0;
 46}
 47
 48u8 *
 49dispatch_hid_bpf_device_event(struct hid_device *hdev, enum hid_report_type type, u8 *data,
 50			      u32 *size, int interrupt)
 51{
 52	struct hid_bpf_ctx_kern ctx_kern = {
 53		.ctx = {
 54			.hid = hdev,
 55			.report_type = type,
 56			.allocated_size = hdev->bpf.allocated_data,
 57			.size = *size,
 58		},
 59		.data = hdev->bpf.device_data,
 60	};
 61	int ret;
 62
 63	if (type >= HID_REPORT_TYPES)
 64		return ERR_PTR(-EINVAL);
 65
 66	/* no program has been attached yet */
 67	if (!hdev->bpf.device_data)
 68		return data;
 69
 70	memset(ctx_kern.data, 0, hdev->bpf.allocated_data);
 71	memcpy(ctx_kern.data, data, *size);
 72
 73	ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_DEVICE_EVENT, &ctx_kern);
 74	if (ret < 0)
 75		return ERR_PTR(ret);
 76
 77	if (ret) {
 78		if (ret > ctx_kern.ctx.allocated_size)
 79			return ERR_PTR(-EINVAL);
 80
 81		*size = ret;
 82	}
 83
 84	return ctx_kern.data;
 85}
 86EXPORT_SYMBOL_GPL(dispatch_hid_bpf_device_event);
 87
 88/**
 89 * hid_bpf_rdesc_fixup - Called when the probe function parses the report
 90 * descriptor of the HID device
 91 *
 92 * @ctx: The HID-BPF context
 93 *
 94 * @return 0 on success and keep processing; a positive value to change the
 95 * incoming size buffer; a negative error code to interrupt the processing
 96 * of this event
 97 *
 98 * Declare an %fmod_ret tracing bpf program to this function and attach this
 99 * program through hid_bpf_attach_prog() to have this helper called before any
100 * parsing of the report descriptor by HID.
101 */
102/* never used by the kernel but declared so we can load and attach a tracepoint */
103__weak noinline int hid_bpf_rdesc_fixup(struct hid_bpf_ctx *ctx)
104{
105	return 0;
106}
107
108u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size)
109{
110	int ret;
111	struct hid_bpf_ctx_kern ctx_kern = {
112		.ctx = {
113			.hid = hdev,
114			.size = *size,
115			.allocated_size = HID_MAX_DESCRIPTOR_SIZE,
116		},
117	};
118
119	ctx_kern.data = kzalloc(ctx_kern.ctx.allocated_size, GFP_KERNEL);
120	if (!ctx_kern.data)
121		goto ignore_bpf;
122
123	memcpy(ctx_kern.data, rdesc, min_t(unsigned int, *size, HID_MAX_DESCRIPTOR_SIZE));
124
125	ret = hid_bpf_prog_run(hdev, HID_BPF_PROG_TYPE_RDESC_FIXUP, &ctx_kern);
126	if (ret < 0)
127		goto ignore_bpf;
128
129	if (ret) {
130		if (ret > ctx_kern.ctx.allocated_size)
131			goto ignore_bpf;
132
133		*size = ret;
134	}
135
136	rdesc = krealloc(ctx_kern.data, *size, GFP_KERNEL);
137
138	return rdesc;
139
140 ignore_bpf:
141	kfree(ctx_kern.data);
142	return kmemdup(rdesc, *size, GFP_KERNEL);
143}
144EXPORT_SYMBOL_GPL(call_hid_bpf_rdesc_fixup);
145
146/* Disables missing prototype warnings */
147__bpf_kfunc_start_defs();
148
149/**
150 * hid_bpf_get_data - Get the kernel memory pointer associated with the context @ctx
151 *
152 * @ctx: The HID-BPF context
153 * @offset: The offset within the memory
154 * @rdwr_buf_size: the const size of the buffer
155 *
156 * @returns %NULL on error, an %__u8 memory pointer on success
157 */
158__bpf_kfunc __u8 *
159hid_bpf_get_data(struct hid_bpf_ctx *ctx, unsigned int offset, const size_t rdwr_buf_size)
160{
161	struct hid_bpf_ctx_kern *ctx_kern;
162
163	if (!ctx)
164		return NULL;
165
166	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
167
168	if (rdwr_buf_size + offset > ctx->allocated_size)
169		return NULL;
170
171	return ctx_kern->data + offset;
172}
173__bpf_kfunc_end_defs();
174
175/*
176 * The following set contains all functions we agree BPF programs
177 * can use.
178 */
179BTF_SET8_START(hid_bpf_kfunc_ids)
180BTF_ID_FLAGS(func, hid_bpf_get_data, KF_RET_NULL)
181BTF_SET8_END(hid_bpf_kfunc_ids)
182
183static const struct btf_kfunc_id_set hid_bpf_kfunc_set = {
184	.owner = THIS_MODULE,
185	.set   = &hid_bpf_kfunc_ids,
186};
187
188static int device_match_id(struct device *dev, const void *id)
189{
190	struct hid_device *hdev = to_hid_device(dev);
191
192	return hdev->id == *(int *)id;
193}
194
195static int __hid_bpf_allocate_data(struct hid_device *hdev, u8 **data, u32 *size)
196{
197	u8 *alloc_data;
198	unsigned int i, j, max_report_len = 0;
199	size_t alloc_size = 0;
200
201	/* compute the maximum report length for this device */
202	for (i = 0; i < HID_REPORT_TYPES; i++) {
203		struct hid_report_enum *report_enum = hdev->report_enum + i;
204
205		for (j = 0; j < HID_MAX_IDS; j++) {
206			struct hid_report *report = report_enum->report_id_hash[j];
207
208			if (report)
209				max_report_len = max(max_report_len, hid_report_len(report));
210		}
211	}
212
213	/*
214	 * Give us a little bit of extra space and some predictability in the
215	 * buffer length we create. This way, we can tell users that they can
216	 * work on chunks of 64 bytes of memory without having the bpf verifier
217	 * scream at them.
218	 */
219	alloc_size = DIV_ROUND_UP(max_report_len, 64) * 64;
220
221	alloc_data = kzalloc(alloc_size, GFP_KERNEL);
222	if (!alloc_data)
223		return -ENOMEM;
224
225	*data = alloc_data;
226	*size = alloc_size;
227
228	return 0;
229}
230
231static int hid_bpf_allocate_event_data(struct hid_device *hdev)
232{
233	/* hdev->bpf.device_data is already allocated, abort */
234	if (hdev->bpf.device_data)
235		return 0;
236
237	return __hid_bpf_allocate_data(hdev, &hdev->bpf.device_data, &hdev->bpf.allocated_data);
238}
239
240int hid_bpf_reconnect(struct hid_device *hdev)
241{
242	if (!test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
243		return device_reprobe(&hdev->dev);
244
245	return 0;
246}
247
248static int do_hid_bpf_attach_prog(struct hid_device *hdev, int prog_fd, struct bpf_prog *prog,
249				  __u32 flags)
250{
251	int fd, err, prog_type;
252
253	prog_type = hid_bpf_get_prog_attach_type(prog);
254	if (prog_type < 0)
255		return prog_type;
256
257	if (prog_type >= HID_BPF_PROG_TYPE_MAX)
258		return -EINVAL;
259
260	if (prog_type == HID_BPF_PROG_TYPE_DEVICE_EVENT) {
261		err = hid_bpf_allocate_event_data(hdev);
262		if (err)
263			return err;
264	}
265
266	fd = __hid_bpf_attach_prog(hdev, prog_type, prog_fd, prog, flags);
267	if (fd < 0)
268		return fd;
269
270	if (prog_type == HID_BPF_PROG_TYPE_RDESC_FIXUP) {
271		err = hid_bpf_reconnect(hdev);
272		if (err) {
273			close_fd(fd);
274			return err;
275		}
276	}
277
278	return fd;
279}
280
281/* Disables missing prototype warnings */
282__bpf_kfunc_start_defs();
283
284/**
285 * hid_bpf_attach_prog - Attach the given @prog_fd to the given HID device
286 *
287 * @hid_id: the system unique identifier of the HID device
288 * @prog_fd: an fd in the user process representing the program to attach
289 * @flags: any logical OR combination of &enum hid_bpf_attach_flags
290 *
291 * @returns an fd of a bpf_link object on success (> %0), an error code otherwise.
292 * Closing this fd will detach the program from the HID device (unless the bpf_link
293 * is pinned to the BPF file system).
294 */
295/* called from syscall */
296__bpf_kfunc int
297hid_bpf_attach_prog(unsigned int hid_id, int prog_fd, __u32 flags)
298{
299	struct hid_device *hdev;
300	struct bpf_prog *prog;
301	struct device *dev;
302	int err, fd;
303
304	if (!hid_bpf_ops)
305		return -EINVAL;
306
307	if ((flags & ~HID_BPF_FLAG_MASK))
308		return -EINVAL;
309
310	dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
311	if (!dev)
312		return -EINVAL;
313
314	hdev = to_hid_device(dev);
315
316	/*
317	 * take a ref on the prog itself, it will be released
318	 * on errors or when it'll be detached
319	 */
320	prog = bpf_prog_get(prog_fd);
321	if (IS_ERR(prog)) {
322		err = PTR_ERR(prog);
323		goto out_dev_put;
324	}
325
326	fd = do_hid_bpf_attach_prog(hdev, prog_fd, prog, flags);
327	if (fd < 0) {
328		err = fd;
329		goto out_prog_put;
330	}
331
332	return fd;
333
334 out_prog_put:
335	bpf_prog_put(prog);
336 out_dev_put:
337	put_device(dev);
338	return err;
339}
340
341/**
342 * hid_bpf_allocate_context - Allocate a context to the given HID device
343 *
344 * @hid_id: the system unique identifier of the HID device
345 *
346 * @returns A pointer to &struct hid_bpf_ctx on success, %NULL on error.
347 */
348__bpf_kfunc struct hid_bpf_ctx *
349hid_bpf_allocate_context(unsigned int hid_id)
350{
351	struct hid_device *hdev;
352	struct hid_bpf_ctx_kern *ctx_kern = NULL;
353	struct device *dev;
354
355	if (!hid_bpf_ops)
356		return NULL;
357
358	dev = bus_find_device(hid_bpf_ops->bus_type, NULL, &hid_id, device_match_id);
359	if (!dev)
360		return NULL;
361
362	hdev = to_hid_device(dev);
363
364	ctx_kern = kzalloc(sizeof(*ctx_kern), GFP_KERNEL);
365	if (!ctx_kern) {
366		put_device(dev);
367		return NULL;
368	}
369
370	ctx_kern->ctx.hid = hdev;
371
372	return &ctx_kern->ctx;
373}
374
375/**
376 * hid_bpf_release_context - Release the previously allocated context @ctx
377 *
378 * @ctx: the HID-BPF context to release
379 *
380 */
381__bpf_kfunc void
382hid_bpf_release_context(struct hid_bpf_ctx *ctx)
383{
384	struct hid_bpf_ctx_kern *ctx_kern;
385	struct hid_device *hid;
386
387	ctx_kern = container_of(ctx, struct hid_bpf_ctx_kern, ctx);
388	hid = (struct hid_device *)ctx_kern->ctx.hid; /* ignore const */
389
390	kfree(ctx_kern);
391
392	/* get_device() is called by bus_find_device() */
393	put_device(&hid->dev);
394}
395
396/**
397 * hid_bpf_hw_request - Communicate with a HID device
398 *
399 * @ctx: the HID-BPF context previously allocated in hid_bpf_allocate_context()
400 * @buf: a %PTR_TO_MEM buffer
401 * @buf__sz: the size of the data to transfer
402 * @rtype: the type of the report (%HID_INPUT_REPORT, %HID_FEATURE_REPORT, %HID_OUTPUT_REPORT)
403 * @reqtype: the type of the request (%HID_REQ_GET_REPORT, %HID_REQ_SET_REPORT, ...)
404 *
405 * @returns %0 on success, a negative error code otherwise.
406 */
407__bpf_kfunc int
408hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
409		   enum hid_report_type rtype, enum hid_class_request reqtype)
410{
411	struct hid_device *hdev;
412	struct hid_report *report;
413	struct hid_report_enum *report_enum;
414	u8 *dma_data;
415	u32 report_len;
416	int ret;
417
418	/* check arguments */
419	if (!ctx || !hid_bpf_ops || !buf)
420		return -EINVAL;
421
422	switch (rtype) {
423	case HID_INPUT_REPORT:
424	case HID_OUTPUT_REPORT:
425	case HID_FEATURE_REPORT:
426		break;
427	default:
428		return -EINVAL;
429	}
430
431	switch (reqtype) {
432	case HID_REQ_GET_REPORT:
433	case HID_REQ_GET_IDLE:
434	case HID_REQ_GET_PROTOCOL:
435	case HID_REQ_SET_REPORT:
436	case HID_REQ_SET_IDLE:
437	case HID_REQ_SET_PROTOCOL:
438		break;
439	default:
440		return -EINVAL;
441	}
442
443	if (buf__sz < 1)
444		return -EINVAL;
445
446	hdev = (struct hid_device *)ctx->hid; /* discard const */
447
448	report_enum = hdev->report_enum + rtype;
449	report = hid_bpf_ops->hid_get_report(report_enum, buf);
450	if (!report)
451		return -EINVAL;
452
453	report_len = hid_report_len(report);
454
455	if (buf__sz > report_len)
456		buf__sz = report_len;
457
458	dma_data = kmemdup(buf, buf__sz, GFP_KERNEL);
459	if (!dma_data)
460		return -ENOMEM;
461
462	ret = hid_bpf_ops->hid_hw_raw_request(hdev,
463					      dma_data[0],
464					      dma_data,
465					      buf__sz,
466					      rtype,
467					      reqtype);
468
469	if (ret > 0)
470		memcpy(buf, dma_data, ret);
471
472	kfree(dma_data);
473	return ret;
474}
475__bpf_kfunc_end_defs();
476
477/* our HID-BPF entrypoints */
478BTF_SET8_START(hid_bpf_fmodret_ids)
479BTF_ID_FLAGS(func, hid_bpf_device_event)
480BTF_ID_FLAGS(func, hid_bpf_rdesc_fixup)
481BTF_ID_FLAGS(func, __hid_bpf_tail_call)
482BTF_SET8_END(hid_bpf_fmodret_ids)
483
484static const struct btf_kfunc_id_set hid_bpf_fmodret_set = {
485	.owner = THIS_MODULE,
486	.set   = &hid_bpf_fmodret_ids,
487};
488
489/* for syscall HID-BPF */
490BTF_SET8_START(hid_bpf_syscall_kfunc_ids)
491BTF_ID_FLAGS(func, hid_bpf_attach_prog)
492BTF_ID_FLAGS(func, hid_bpf_allocate_context, KF_ACQUIRE | KF_RET_NULL)
493BTF_ID_FLAGS(func, hid_bpf_release_context, KF_RELEASE)
494BTF_ID_FLAGS(func, hid_bpf_hw_request)
495BTF_SET8_END(hid_bpf_syscall_kfunc_ids)
496
497static const struct btf_kfunc_id_set hid_bpf_syscall_kfunc_set = {
498	.owner = THIS_MODULE,
499	.set   = &hid_bpf_syscall_kfunc_ids,
500};
501
502int hid_bpf_connect_device(struct hid_device *hdev)
503{
504	struct hid_bpf_prog_list *prog_list;
505
506	rcu_read_lock();
507	prog_list = rcu_dereference(hdev->bpf.progs[HID_BPF_PROG_TYPE_DEVICE_EVENT]);
508	rcu_read_unlock();
509
510	/* only allocate BPF data if there are programs attached */
511	if (!prog_list)
512		return 0;
513
514	return hid_bpf_allocate_event_data(hdev);
515}
516EXPORT_SYMBOL_GPL(hid_bpf_connect_device);
517
518void hid_bpf_disconnect_device(struct hid_device *hdev)
519{
520	kfree(hdev->bpf.device_data);
521	hdev->bpf.device_data = NULL;
522	hdev->bpf.allocated_data = 0;
523}
524EXPORT_SYMBOL_GPL(hid_bpf_disconnect_device);
525
526void hid_bpf_destroy_device(struct hid_device *hdev)
527{
528	if (!hdev)
529		return;
530
531	/* mark the device as destroyed in bpf so we don't reattach it */
532	hdev->bpf.destroyed = true;
533
534	__hid_bpf_destroy_device(hdev);
535}
536EXPORT_SYMBOL_GPL(hid_bpf_destroy_device);
537
538void hid_bpf_device_init(struct hid_device *hdev)
539{
540	spin_lock_init(&hdev->bpf.progs_lock);
541}
542EXPORT_SYMBOL_GPL(hid_bpf_device_init);
543
544static int __init hid_bpf_init(void)
545{
546	int err;
547
548	/* Note: if we exit with an error any time here, we would entirely break HID, which
549	 * is probably not something we want. So we log an error and return success.
550	 *
551	 * This is not a big deal: the syscall allowing to attach a BPF program to a HID device
552	 * will not be available, so nobody will be able to use the functionality.
553	 */
554
555	err = register_btf_fmodret_id_set(&hid_bpf_fmodret_set);
556	if (err) {
557		pr_warn("error while registering fmodret entrypoints: %d", err);
558		return 0;
559	}
560
561	err = hid_bpf_preload_skel();
562	if (err) {
563		pr_warn("error while preloading HID BPF dispatcher: %d", err);
564		return 0;
565	}
566
567	/* register tracing kfuncs after we are sure we can load our preloaded bpf program */
568	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &hid_bpf_kfunc_set);
569	if (err) {
570		pr_warn("error while setting HID BPF tracing kfuncs: %d", err);
571		return 0;
572	}
573
574	/* register syscalls after we are sure we can load our preloaded bpf program */
575	err = register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &hid_bpf_syscall_kfunc_set);
576	if (err) {
577		pr_warn("error while setting HID BPF syscall kfuncs: %d", err);
578		return 0;
579	}
580
581	return 0;
582}
583
584static void __exit hid_bpf_exit(void)
585{
586	/* HID depends on us, so if we hit that code, we are guaranteed that hid
587	 * has been removed and thus we do not need to clear the HID devices
588	 */
589	hid_bpf_free_links_and_skel();
590}
591
592late_initcall(hid_bpf_init);
593module_exit(hid_bpf_exit);
594MODULE_AUTHOR("Benjamin Tissoires");
595MODULE_LICENSE("GPL");