Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
  1/*
  2 * This module provides an interface to trigger and test firmware loading.
  3 *
  4 * It is designed to be used for basic evaluation of the firmware loading
  5 * subsystem (for example when validating firmware verification). It lacks
  6 * any extra dependencies, and will not normally be loaded by the system
  7 * unless explicitly requested by name.
  8 */
  9
 10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 11
 12#include <linux/init.h>
 13#include <linux/module.h>
 14#include <linux/printk.h>
 15#include <linux/completion.h>
 16#include <linux/firmware.h>
 17#include <linux/device.h>
 18#include <linux/fs.h>
 19#include <linux/miscdevice.h>
 20#include <linux/slab.h>
 21#include <linux/uaccess.h>
 22#include <linux/delay.h>
 23#include <linux/kthread.h>
 24#include <linux/vmalloc.h>
 25
 26#define TEST_FIRMWARE_NAME	"test-firmware.bin"
 27#define TEST_FIRMWARE_NUM_REQS	4
 28
 29static DEFINE_MUTEX(test_fw_mutex);
 30static const struct firmware *test_firmware;
 31
 32struct test_batched_req {
 33	u8 idx;
 34	int rc;
 35	bool sent;
 36	const struct firmware *fw;
 37	const char *name;
 38	struct completion completion;
 39	struct task_struct *task;
 40	struct device *dev;
 41};
 42
 43/**
 44 * test_config - represents configuration for the test for different triggers
 45 *
 46 * @name: the name of the firmware file to look for
 47 * @sync_direct: when the sync trigger is used if this is true
 48 *	request_firmware_direct() will be used instead.
 49 * @send_uevent: whether or not to send a uevent for async requests
 50 * @num_requests: number of requests to try per test case. This is trigger
 51 *	specific.
 52 * @reqs: stores all requests information
 53 * @read_fw_idx: index of thread from which we want to read firmware results
 54 *	from through the read_fw trigger.
 55 * @test_result: a test may use this to collect the result from the call
 56 *	of the request_firmware*() calls used in their tests. In order of
 57 *	priority we always keep first any setup error. If no setup errors were
 58 *	found then we move on to the first error encountered while running the
 59 *	API. Note that for async calls this typically will be a successful
 60 *	result (0) unless of course you've used bogus parameters, or the system
 61 *	is out of memory.  In the async case the callback is expected to do a
 62 *	bit more homework to figure out what happened, unfortunately the only
 63 *	information passed today on error is the fact that no firmware was
 64 *	found so we can only assume -ENOENT on async calls if the firmware is
 65 *	NULL.
 66 *
 67 *	Errors you can expect:
 68 *
 69 *	API specific:
 70 *
 71 *	0:		success for sync, for async it means request was sent
 72 *	-EINVAL:	invalid parameters or request
 73 *	-ENOENT:	files not found
 74 *
 75 *	System environment:
 76 *
 77 *	-ENOMEM:	memory pressure on system
 78 *	-ENODEV:	out of number of devices to test
 79 *	-EINVAL:	an unexpected error has occurred
 80 * @req_firmware: if @sync_direct is true this is set to
 81 *	request_firmware_direct(), otherwise request_firmware()
 82 */
 83struct test_config {
 84	char *name;
 85	bool sync_direct;
 86	bool send_uevent;
 87	u8 num_requests;
 88	u8 read_fw_idx;
 89
 90	/*
 91	 * These below don't belong her but we'll move them once we create
 92	 * a struct fw_test_device and stuff the misc_dev under there later.
 93	 */
 94	struct test_batched_req *reqs;
 95	int test_result;
 96	int (*req_firmware)(const struct firmware **fw, const char *name,
 97			    struct device *device);
 98};
 99
100static struct test_config *test_fw_config;
101
102static ssize_t test_fw_misc_read(struct file *f, char __user *buf,
103				 size_t size, loff_t *offset)
104{
105	ssize_t rc = 0;
106
107	mutex_lock(&test_fw_mutex);
108	if (test_firmware)
109		rc = simple_read_from_buffer(buf, size, offset,
110					     test_firmware->data,
111					     test_firmware->size);
112	mutex_unlock(&test_fw_mutex);
113	return rc;
114}
115
116static const struct file_operations test_fw_fops = {
117	.owner          = THIS_MODULE,
118	.read           = test_fw_misc_read,
119};
120
121static void __test_release_all_firmware(void)
122{
123	struct test_batched_req *req;
124	u8 i;
125
126	if (!test_fw_config->reqs)
127		return;
128
129	for (i = 0; i < test_fw_config->num_requests; i++) {
130		req = &test_fw_config->reqs[i];
131		if (req->fw)
132			release_firmware(req->fw);
133	}
134
135	vfree(test_fw_config->reqs);
136	test_fw_config->reqs = NULL;
137}
138
139static void test_release_all_firmware(void)
140{
141	mutex_lock(&test_fw_mutex);
142	__test_release_all_firmware();
143	mutex_unlock(&test_fw_mutex);
144}
145
146
147static void __test_firmware_config_free(void)
148{
149	__test_release_all_firmware();
150	kfree_const(test_fw_config->name);
151	test_fw_config->name = NULL;
152}
153
154/*
155 * XXX: move to kstrncpy() once merged.
156 *
157 * Users should use kfree_const() when freeing these.
158 */
159static int __kstrncpy(char **dst, const char *name, size_t count, gfp_t gfp)
160{
161	*dst = kstrndup(name, count, gfp);
162	if (!*dst)
163		return -ENOSPC;
164	return count;
165}
166
167static int __test_firmware_config_init(void)
168{
169	int ret;
170
171	ret = __kstrncpy(&test_fw_config->name, TEST_FIRMWARE_NAME,
172			 strlen(TEST_FIRMWARE_NAME), GFP_KERNEL);
173	if (ret < 0)
174		goto out;
175
176	test_fw_config->num_requests = TEST_FIRMWARE_NUM_REQS;
177	test_fw_config->send_uevent = true;
178	test_fw_config->sync_direct = false;
179	test_fw_config->req_firmware = request_firmware;
180	test_fw_config->test_result = 0;
181	test_fw_config->reqs = NULL;
182
183	return 0;
184
185out:
186	__test_firmware_config_free();
187	return ret;
188}
189
190static ssize_t reset_store(struct device *dev,
191			   struct device_attribute *attr,
192			   const char *buf, size_t count)
193{
194	int ret;
195
196	mutex_lock(&test_fw_mutex);
197
198	__test_firmware_config_free();
199
200	ret = __test_firmware_config_init();
201	if (ret < 0) {
202		ret = -ENOMEM;
203		pr_err("could not alloc settings for config trigger: %d\n",
204		       ret);
205		goto out;
206	}
207
208	pr_info("reset\n");
209	ret = count;
210
211out:
212	mutex_unlock(&test_fw_mutex);
213
214	return ret;
215}
216static DEVICE_ATTR_WO(reset);
217
218static ssize_t config_show(struct device *dev,
219			   struct device_attribute *attr,
220			   char *buf)
221{
222	int len = 0;
223
224	mutex_lock(&test_fw_mutex);
225
226	len += snprintf(buf, PAGE_SIZE,
227			"Custom trigger configuration for: %s\n",
228			dev_name(dev));
229
230	if (test_fw_config->name)
231		len += snprintf(buf+len, PAGE_SIZE,
232				"name:\t%s\n",
233				test_fw_config->name);
234	else
235		len += snprintf(buf+len, PAGE_SIZE,
236				"name:\tEMTPY\n");
237
238	len += snprintf(buf+len, PAGE_SIZE,
239			"num_requests:\t%u\n", test_fw_config->num_requests);
240
241	len += snprintf(buf+len, PAGE_SIZE,
242			"send_uevent:\t\t%s\n",
243			test_fw_config->send_uevent ?
244			"FW_ACTION_HOTPLUG" :
245			"FW_ACTION_NOHOTPLUG");
246	len += snprintf(buf+len, PAGE_SIZE,
247			"sync_direct:\t\t%s\n",
248			test_fw_config->sync_direct ? "true" : "false");
249	len += snprintf(buf+len, PAGE_SIZE,
250			"read_fw_idx:\t%u\n", test_fw_config->read_fw_idx);
251
252	mutex_unlock(&test_fw_mutex);
253
254	return len;
255}
256static DEVICE_ATTR_RO(config);
257
258static ssize_t config_name_store(struct device *dev,
259				 struct device_attribute *attr,
260				 const char *buf, size_t count)
261{
262	int ret;
263
264	mutex_lock(&test_fw_mutex);
265	kfree_const(test_fw_config->name);
266	ret = __kstrncpy(&test_fw_config->name, buf, count, GFP_KERNEL);
267	mutex_unlock(&test_fw_mutex);
268
269	return ret;
270}
271
272/*
273 * As per sysfs_kf_seq_show() the buf is max PAGE_SIZE.
274 */
275static ssize_t config_test_show_str(char *dst,
276				    char *src)
277{
278	int len;
279
280	mutex_lock(&test_fw_mutex);
281	len = snprintf(dst, PAGE_SIZE, "%s\n", src);
282	mutex_unlock(&test_fw_mutex);
283
284	return len;
285}
286
287static int test_dev_config_update_bool(const char *buf, size_t size,
288				       bool *cfg)
289{
290	int ret;
291
292	mutex_lock(&test_fw_mutex);
293	if (strtobool(buf, cfg) < 0)
294		ret = -EINVAL;
295	else
296		ret = size;
297	mutex_unlock(&test_fw_mutex);
298
299	return ret;
300}
301
302static ssize_t
303test_dev_config_show_bool(char *buf,
304			  bool config)
305{
306	bool val;
307
308	mutex_lock(&test_fw_mutex);
309	val = config;
310	mutex_unlock(&test_fw_mutex);
311
312	return snprintf(buf, PAGE_SIZE, "%d\n", val);
313}
314
315static ssize_t test_dev_config_show_int(char *buf, int cfg)
316{
317	int val;
318
319	mutex_lock(&test_fw_mutex);
320	val = cfg;
321	mutex_unlock(&test_fw_mutex);
322
323	return snprintf(buf, PAGE_SIZE, "%d\n", val);
324}
325
326static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg)
327{
328	int ret;
329	long new;
330
331	ret = kstrtol(buf, 10, &new);
332	if (ret)
333		return ret;
334
335	if (new > U8_MAX)
336		return -EINVAL;
337
338	mutex_lock(&test_fw_mutex);
339	*(u8 *)cfg = new;
340	mutex_unlock(&test_fw_mutex);
341
342	/* Always return full write size even if we didn't consume all */
343	return size;
344}
345
346static ssize_t test_dev_config_show_u8(char *buf, u8 cfg)
347{
348	u8 val;
349
350	mutex_lock(&test_fw_mutex);
351	val = cfg;
352	mutex_unlock(&test_fw_mutex);
353
354	return snprintf(buf, PAGE_SIZE, "%u\n", val);
355}
356
357static ssize_t config_name_show(struct device *dev,
358				struct device_attribute *attr,
359				char *buf)
360{
361	return config_test_show_str(buf, test_fw_config->name);
362}
363static DEVICE_ATTR_RW(config_name);
364
365static ssize_t config_num_requests_store(struct device *dev,
366					 struct device_attribute *attr,
367					 const char *buf, size_t count)
368{
369	int rc;
370
371	mutex_lock(&test_fw_mutex);
372	if (test_fw_config->reqs) {
373		pr_err("Must call release_all_firmware prior to changing config\n");
374		rc = -EINVAL;
375		mutex_unlock(&test_fw_mutex);
376		goto out;
377	}
378	mutex_unlock(&test_fw_mutex);
379
380	rc = test_dev_config_update_u8(buf, count,
381				       &test_fw_config->num_requests);
382
383out:
384	return rc;
385}
386
387static ssize_t config_num_requests_show(struct device *dev,
388					struct device_attribute *attr,
389					char *buf)
390{
391	return test_dev_config_show_u8(buf, test_fw_config->num_requests);
392}
393static DEVICE_ATTR_RW(config_num_requests);
394
395static ssize_t config_sync_direct_store(struct device *dev,
396					struct device_attribute *attr,
397					const char *buf, size_t count)
398{
399	int rc = test_dev_config_update_bool(buf, count,
400					     &test_fw_config->sync_direct);
401
402	if (rc == count)
403		test_fw_config->req_firmware = test_fw_config->sync_direct ?
404				       request_firmware_direct :
405				       request_firmware;
406	return rc;
407}
408
409static ssize_t config_sync_direct_show(struct device *dev,
410				       struct device_attribute *attr,
411				       char *buf)
412{
413	return test_dev_config_show_bool(buf, test_fw_config->sync_direct);
414}
415static DEVICE_ATTR_RW(config_sync_direct);
416
417static ssize_t config_send_uevent_store(struct device *dev,
418					struct device_attribute *attr,
419					const char *buf, size_t count)
420{
421	return test_dev_config_update_bool(buf, count,
422					   &test_fw_config->send_uevent);
423}
424
425static ssize_t config_send_uevent_show(struct device *dev,
426				       struct device_attribute *attr,
427				       char *buf)
428{
429	return test_dev_config_show_bool(buf, test_fw_config->send_uevent);
430}
431static DEVICE_ATTR_RW(config_send_uevent);
432
433static ssize_t config_read_fw_idx_store(struct device *dev,
434					struct device_attribute *attr,
435					const char *buf, size_t count)
436{
437	return test_dev_config_update_u8(buf, count,
438					 &test_fw_config->read_fw_idx);
439}
440
441static ssize_t config_read_fw_idx_show(struct device *dev,
442				       struct device_attribute *attr,
443				       char *buf)
444{
445	return test_dev_config_show_u8(buf, test_fw_config->read_fw_idx);
446}
447static DEVICE_ATTR_RW(config_read_fw_idx);
448
449
450static ssize_t trigger_request_store(struct device *dev,
451				     struct device_attribute *attr,
452				     const char *buf, size_t count)
453{
454	int rc;
455	char *name;
456
457	name = kstrndup(buf, count, GFP_KERNEL);
458	if (!name)
459		return -ENOSPC;
460
461	pr_info("loading '%s'\n", name);
462
463	mutex_lock(&test_fw_mutex);
464	release_firmware(test_firmware);
465	test_firmware = NULL;
466	rc = request_firmware(&test_firmware, name, dev);
467	if (rc) {
468		pr_info("load of '%s' failed: %d\n", name, rc);
469		goto out;
470	}
471	pr_info("loaded: %zu\n", test_firmware->size);
472	rc = count;
473
474out:
475	mutex_unlock(&test_fw_mutex);
476
477	kfree(name);
478
479	return rc;
480}
481static DEVICE_ATTR_WO(trigger_request);
482
483static DECLARE_COMPLETION(async_fw_done);
484
485static void trigger_async_request_cb(const struct firmware *fw, void *context)
486{
487	test_firmware = fw;
488	complete(&async_fw_done);
489}
490
491static ssize_t trigger_async_request_store(struct device *dev,
492					   struct device_attribute *attr,
493					   const char *buf, size_t count)
494{
495	int rc;
496	char *name;
497
498	name = kstrndup(buf, count, GFP_KERNEL);
499	if (!name)
500		return -ENOSPC;
501
502	pr_info("loading '%s'\n", name);
503
504	mutex_lock(&test_fw_mutex);
505	release_firmware(test_firmware);
506	test_firmware = NULL;
507	rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL,
508				     NULL, trigger_async_request_cb);
509	if (rc) {
510		pr_info("async load of '%s' failed: %d\n", name, rc);
511		kfree(name);
512		goto out;
513	}
514	/* Free 'name' ASAP, to test for race conditions */
515	kfree(name);
516
517	wait_for_completion(&async_fw_done);
518
519	if (test_firmware) {
520		pr_info("loaded: %zu\n", test_firmware->size);
521		rc = count;
522	} else {
523		pr_err("failed to async load firmware\n");
524		rc = -ENODEV;
525	}
526
527out:
528	mutex_unlock(&test_fw_mutex);
529
530	return rc;
531}
532static DEVICE_ATTR_WO(trigger_async_request);
533
534static ssize_t trigger_custom_fallback_store(struct device *dev,
535					     struct device_attribute *attr,
536					     const char *buf, size_t count)
537{
538	int rc;
539	char *name;
540
541	name = kstrndup(buf, count, GFP_KERNEL);
542	if (!name)
543		return -ENOSPC;
544
545	pr_info("loading '%s' using custom fallback mechanism\n", name);
546
547	mutex_lock(&test_fw_mutex);
548	release_firmware(test_firmware);
549	test_firmware = NULL;
550	rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOHOTPLUG, name,
551				     dev, GFP_KERNEL, NULL,
552				     trigger_async_request_cb);
553	if (rc) {
554		pr_info("async load of '%s' failed: %d\n", name, rc);
555		kfree(name);
556		goto out;
557	}
558	/* Free 'name' ASAP, to test for race conditions */
559	kfree(name);
560
561	wait_for_completion(&async_fw_done);
562
563	if (test_firmware) {
564		pr_info("loaded: %zu\n", test_firmware->size);
565		rc = count;
566	} else {
567		pr_err("failed to async load firmware\n");
568		rc = -ENODEV;
569	}
570
571out:
572	mutex_unlock(&test_fw_mutex);
573
574	return rc;
575}
576static DEVICE_ATTR_WO(trigger_custom_fallback);
577
578static int test_fw_run_batch_request(void *data)
579{
580	struct test_batched_req *req = data;
581
582	if (!req) {
583		test_fw_config->test_result = -EINVAL;
584		return -EINVAL;
585	}
586
587	req->rc = test_fw_config->req_firmware(&req->fw, req->name, req->dev);
588	if (req->rc) {
589		pr_info("#%u: batched sync load failed: %d\n",
590			req->idx, req->rc);
591		if (!test_fw_config->test_result)
592			test_fw_config->test_result = req->rc;
593	} else if (req->fw) {
594		req->sent = true;
595		pr_info("#%u: batched sync loaded %zu\n",
596			req->idx, req->fw->size);
597	}
598	complete(&req->completion);
599
600	req->task = NULL;
601
602	return 0;
603}
604
605/*
606 * We use a kthread as otherwise the kernel serializes all our sync requests
607 * and we would not be able to mimic batched requests on a sync call. Batched
608 * requests on a sync call can for instance happen on a device driver when
609 * multiple cards are used and firmware loading happens outside of probe.
610 */
611static ssize_t trigger_batched_requests_store(struct device *dev,
612					      struct device_attribute *attr,
613					      const char *buf, size_t count)
614{
615	struct test_batched_req *req;
616	int rc;
617	u8 i;
618
619	mutex_lock(&test_fw_mutex);
620
621	test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
622				       test_fw_config->num_requests * 2);
623	if (!test_fw_config->reqs) {
624		rc = -ENOMEM;
625		goto out_unlock;
626	}
627
628	pr_info("batched sync firmware loading '%s' %u times\n",
629		test_fw_config->name, test_fw_config->num_requests);
630
631	for (i = 0; i < test_fw_config->num_requests; i++) {
632		req = &test_fw_config->reqs[i];
633		if (!req) {
634			WARN_ON(1);
635			rc = -ENOMEM;
636			goto out_bail;
637		}
638		req->fw = NULL;
639		req->idx = i;
640		req->name = test_fw_config->name;
641		req->dev = dev;
642		init_completion(&req->completion);
643		req->task = kthread_run(test_fw_run_batch_request, req,
644					     "%s-%u", KBUILD_MODNAME, req->idx);
645		if (!req->task || IS_ERR(req->task)) {
646			pr_err("Setting up thread %u failed\n", req->idx);
647			req->task = NULL;
648			rc = -ENOMEM;
649			goto out_bail;
650		}
651	}
652
653	rc = count;
654
655	/*
656	 * We require an explicit release to enable more time and delay of
657	 * calling release_firmware() to improve our chances of forcing a
658	 * batched request. If we instead called release_firmware() right away
659	 * then we might miss on an opportunity of having a successful firmware
660	 * request pass on the opportunity to be come a batched request.
661	 */
662
663out_bail:
664	for (i = 0; i < test_fw_config->num_requests; i++) {
665		req = &test_fw_config->reqs[i];
666		if (req->task || req->sent)
667			wait_for_completion(&req->completion);
668	}
669
670	/* Override any worker error if we had a general setup error */
671	if (rc < 0)
672		test_fw_config->test_result = rc;
673
674out_unlock:
675	mutex_unlock(&test_fw_mutex);
676
677	return rc;
678}
679static DEVICE_ATTR_WO(trigger_batched_requests);
680
681/*
682 * We wait for each callback to return with the lock held, no need to lock here
683 */
684static void trigger_batched_cb(const struct firmware *fw, void *context)
685{
686	struct test_batched_req *req = context;
687
688	if (!req) {
689		test_fw_config->test_result = -EINVAL;
690		return;
691	}
692
693	/* forces *some* batched requests to queue up */
694	if (!req->idx)
695		ssleep(2);
696
697	req->fw = fw;
698
699	/*
700	 * Unfortunately the firmware API gives us nothing other than a null FW
701	 * if the firmware was not found on async requests.  Best we can do is
702	 * just assume -ENOENT. A better API would pass the actual return
703	 * value to the callback.
704	 */
705	if (!fw && !test_fw_config->test_result)
706		test_fw_config->test_result = -ENOENT;
707
708	complete(&req->completion);
709}
710
711static
712ssize_t trigger_batched_requests_async_store(struct device *dev,
713					     struct device_attribute *attr,
714					     const char *buf, size_t count)
715{
716	struct test_batched_req *req;
717	bool send_uevent;
718	int rc;
719	u8 i;
720
721	mutex_lock(&test_fw_mutex);
722
723	test_fw_config->reqs = vzalloc(sizeof(struct test_batched_req) *
724				       test_fw_config->num_requests * 2);
725	if (!test_fw_config->reqs) {
726		rc = -ENOMEM;
727		goto out;
728	}
729
730	pr_info("batched loading '%s' custom fallback mechanism %u times\n",
731		test_fw_config->name, test_fw_config->num_requests);
732
733	send_uevent = test_fw_config->send_uevent ? FW_ACTION_HOTPLUG :
734		FW_ACTION_NOHOTPLUG;
735
736	for (i = 0; i < test_fw_config->num_requests; i++) {
737		req = &test_fw_config->reqs[i];
738		if (!req) {
739			WARN_ON(1);
740			goto out_bail;
741		}
742		req->name = test_fw_config->name;
743		req->fw = NULL;
744		req->idx = i;
745		init_completion(&req->completion);
746		rc = request_firmware_nowait(THIS_MODULE, send_uevent,
747					     req->name,
748					     dev, GFP_KERNEL, req,
749					     trigger_batched_cb);
750		if (rc) {
751			pr_info("#%u: batched async load failed setup: %d\n",
752				i, rc);
753			req->rc = rc;
754			goto out_bail;
755		} else
756			req->sent = true;
757	}
758
759	rc = count;
760
761out_bail:
762
763	/*
764	 * We require an explicit release to enable more time and delay of
765	 * calling release_firmware() to improve our chances of forcing a
766	 * batched request. If we instead called release_firmware() right away
767	 * then we might miss on an opportunity of having a successful firmware
768	 * request pass on the opportunity to be come a batched request.
769	 */
770
771	for (i = 0; i < test_fw_config->num_requests; i++) {
772		req = &test_fw_config->reqs[i];
773		if (req->sent)
774			wait_for_completion(&req->completion);
775	}
776
777	/* Override any worker error if we had a general setup error */
778	if (rc < 0)
779		test_fw_config->test_result = rc;
780
781out:
782	mutex_unlock(&test_fw_mutex);
783
784	return rc;
785}
786static DEVICE_ATTR_WO(trigger_batched_requests_async);
787
788static ssize_t test_result_show(struct device *dev,
789				struct device_attribute *attr,
790				char *buf)
791{
792	return test_dev_config_show_int(buf, test_fw_config->test_result);
793}
794static DEVICE_ATTR_RO(test_result);
795
796static ssize_t release_all_firmware_store(struct device *dev,
797					  struct device_attribute *attr,
798					  const char *buf, size_t count)
799{
800	test_release_all_firmware();
801	return count;
802}
803static DEVICE_ATTR_WO(release_all_firmware);
804
805static ssize_t read_firmware_show(struct device *dev,
806				  struct device_attribute *attr,
807				  char *buf)
808{
809	struct test_batched_req *req;
810	u8 idx;
811	ssize_t rc = 0;
812
813	mutex_lock(&test_fw_mutex);
814
815	idx = test_fw_config->read_fw_idx;
816	if (idx >= test_fw_config->num_requests) {
817		rc = -ERANGE;
818		goto out;
819	}
820
821	if (!test_fw_config->reqs) {
822		rc = -EINVAL;
823		goto out;
824	}
825
826	req = &test_fw_config->reqs[idx];
827	if (!req->fw) {
828		pr_err("#%u: failed to async load firmware\n", idx);
829		rc = -ENOENT;
830		goto out;
831	}
832
833	pr_info("#%u: loaded %zu\n", idx, req->fw->size);
834
835	if (req->fw->size > PAGE_SIZE) {
836		pr_err("Testing interface must use PAGE_SIZE firmware for now\n");
837		rc = -EINVAL;
838	}
839	memcpy(buf, req->fw->data, req->fw->size);
840
841	rc = req->fw->size;
842out:
843	mutex_unlock(&test_fw_mutex);
844
845	return rc;
846}
847static DEVICE_ATTR_RO(read_firmware);
848
849#define TEST_FW_DEV_ATTR(name)          &dev_attr_##name.attr
850
851static struct attribute *test_dev_attrs[] = {
852	TEST_FW_DEV_ATTR(reset),
853
854	TEST_FW_DEV_ATTR(config),
855	TEST_FW_DEV_ATTR(config_name),
856	TEST_FW_DEV_ATTR(config_num_requests),
857	TEST_FW_DEV_ATTR(config_sync_direct),
858	TEST_FW_DEV_ATTR(config_send_uevent),
859	TEST_FW_DEV_ATTR(config_read_fw_idx),
860
861	/* These don't use the config at all - they could be ported! */
862	TEST_FW_DEV_ATTR(trigger_request),
863	TEST_FW_DEV_ATTR(trigger_async_request),
864	TEST_FW_DEV_ATTR(trigger_custom_fallback),
865
866	/* These use the config and can use the test_result */
867	TEST_FW_DEV_ATTR(trigger_batched_requests),
868	TEST_FW_DEV_ATTR(trigger_batched_requests_async),
869
870	TEST_FW_DEV_ATTR(release_all_firmware),
871	TEST_FW_DEV_ATTR(test_result),
872	TEST_FW_DEV_ATTR(read_firmware),
873	NULL,
874};
875
876ATTRIBUTE_GROUPS(test_dev);
877
878static struct miscdevice test_fw_misc_device = {
879	.minor          = MISC_DYNAMIC_MINOR,
880	.name           = "test_firmware",
881	.fops           = &test_fw_fops,
882	.groups 	= test_dev_groups,
883};
884
885static int __init test_firmware_init(void)
886{
887	int rc;
888
889	test_fw_config = kzalloc(sizeof(struct test_config), GFP_KERNEL);
890	if (!test_fw_config)
891		return -ENOMEM;
892
893	rc = __test_firmware_config_init();
894	if (rc)
895		return rc;
896
897	rc = misc_register(&test_fw_misc_device);
898	if (rc) {
899		kfree(test_fw_config);
900		pr_err("could not register misc device: %d\n", rc);
901		return rc;
902	}
903
904	pr_warn("interface ready\n");
905
906	return 0;
907}
908
909module_init(test_firmware_init);
910
911static void __exit test_firmware_exit(void)
912{
913	mutex_lock(&test_fw_mutex);
914	release_firmware(test_firmware);
915	misc_deregister(&test_fw_misc_device);
916	__test_firmware_config_free();
917	kfree(test_fw_config);
918	mutex_unlock(&test_fw_mutex);
919
920	pr_warn("removed interface\n");
921}
922
923module_exit(test_firmware_exit);
924
925MODULE_AUTHOR("Kees Cook <keescook@chromium.org>");
926MODULE_LICENSE("GPL");