Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2   Copyright (C) 2002 Richard Henderson
   3   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
   4
   5    This program is free software; you can redistribute it and/or modify
   6    it under the terms of the GNU General Public License as published by
   7    the Free Software Foundation; either version 2 of the License, or
   8    (at your option) any later version.
   9
  10    This program is distributed in the hope that it will be useful,
  11    but WITHOUT ANY WARRANTY; without even the implied warranty of
  12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13    GNU General Public License for more details.
  14
  15    You should have received a copy of the GNU General Public License
  16    along with this program; if not, write to the Free Software
  17    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18*/
  19#include <linux/module.h>
 
 
 
 
  20#include <linux/moduleloader.h>
  21#include <linux/ftrace_event.h>
 
  22#include <linux/init.h>
  23#include <linux/kallsyms.h>
 
  24#include <linux/fs.h>
  25#include <linux/sysfs.h>
  26#include <linux/kernel.h>
  27#include <linux/slab.h>
  28#include <linux/vmalloc.h>
  29#include <linux/elf.h>
  30#include <linux/proc_fs.h>
 
  31#include <linux/seq_file.h>
  32#include <linux/syscalls.h>
  33#include <linux/fcntl.h>
  34#include <linux/rcupdate.h>
  35#include <linux/capability.h>
  36#include <linux/cpu.h>
  37#include <linux/moduleparam.h>
  38#include <linux/errno.h>
  39#include <linux/err.h>
  40#include <linux/vermagic.h>
  41#include <linux/notifier.h>
  42#include <linux/sched.h>
  43#include <linux/stop_machine.h>
  44#include <linux/device.h>
  45#include <linux/string.h>
  46#include <linux/mutex.h>
  47#include <linux/rculist.h>
  48#include <asm/uaccess.h>
  49#include <asm/cacheflush.h>
 
  50#include <asm/mmu_context.h>
  51#include <linux/license.h>
  52#include <asm/sections.h>
  53#include <linux/tracepoint.h>
  54#include <linux/ftrace.h>
 
  55#include <linux/async.h>
  56#include <linux/percpu.h>
  57#include <linux/kmemleak.h>
  58#include <linux/jump_label.h>
  59#include <linux/pfn.h>
  60#include <linux/bsearch.h>
 
 
 
 
  61
  62#define CREATE_TRACE_POINTS
  63#include <trace/events/module.h>
  64
  65#if 0
  66#define DEBUGP printk
  67#else
  68#define DEBUGP(fmt , a...)
  69#endif
  70
  71#ifndef ARCH_SHF_SMALL
  72#define ARCH_SHF_SMALL 0
  73#endif
  74
  75/*
  76 * Modules' sections will be aligned on page boundaries
  77 * to ensure complete separation of code and data, but
  78 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
  79 */
  80#ifdef CONFIG_DEBUG_SET_MODULE_RONX
  81# define debug_align(X) ALIGN(X, PAGE_SIZE)
  82#else
  83# define debug_align(X) (X)
  84#endif
  85
  86/*
  87 * Given BASE and SIZE this macro calculates the number of pages the
  88 * memory regions occupies
  89 */
  90#define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ?		\
  91		(PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) -	\
  92			 PFN_DOWN((unsigned long)BASE) + 1)	\
  93		: (0UL))
  94
  95/* If this is set, the section belongs in the init part of the module */
  96#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  97
  98/*
  99 * Mutex protects:
 100 * 1) List of modules (also safely readable with preempt_disable),
 101 * 2) module_use links,
 102 * 3) module_addr_min/module_addr_max.
 103 * (delete uses stop_machine/add uses RCU list operations). */
 104DEFINE_MUTEX(module_mutex);
 105EXPORT_SYMBOL_GPL(module_mutex);
 106static LIST_HEAD(modules);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 107#ifdef CONFIG_KGDB_KDB
 108struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
 109#endif /* CONFIG_KGDB_KDB */
 110
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 111
 112/* Block module loading/unloading? */
 113int modules_disabled = 0;
 
 114
 115/* Waiting for a module to finish initializing? */
 116static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 117
 118static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 119
 120/* Bounds of module allocation, for speeding __module_address.
 121 * Protected by module_mutex. */
 122static unsigned long module_addr_min = -1UL, module_addr_max = 0;
 123
 124int register_module_notifier(struct notifier_block * nb)
 125{
 126	return blocking_notifier_chain_register(&module_notify_list, nb);
 127}
 128EXPORT_SYMBOL(register_module_notifier);
 129
 130int unregister_module_notifier(struct notifier_block * nb)
 131{
 132	return blocking_notifier_chain_unregister(&module_notify_list, nb);
 133}
 134EXPORT_SYMBOL(unregister_module_notifier);
 135
 136struct load_info {
 137	Elf_Ehdr *hdr;
 138	unsigned long len;
 139	Elf_Shdr *sechdrs;
 140	char *secstrings, *strtab;
 141	unsigned long *strmap;
 142	unsigned long symoffs, stroffs;
 143	struct _ddebug *debug;
 144	unsigned int num_debug;
 145	struct {
 146		unsigned int sym, str, mod, vers, info, pcpu;
 147	} index;
 148};
 149
 150/* We require a truly strong try_module_get(): 0 means failure due to
 151   ongoing or failed initialization etc. */
 152static inline int strong_try_module_get(struct module *mod)
 153{
 
 154	if (mod && mod->state == MODULE_STATE_COMING)
 155		return -EBUSY;
 156	if (try_module_get(mod))
 157		return 0;
 158	else
 159		return -ENOENT;
 160}
 161
 162static inline void add_taint_module(struct module *mod, unsigned flag)
 
 163{
 164	add_taint(flag);
 165	mod->taints |= (1U << flag);
 166}
 167
 168/*
 169 * A thread that wants to hold a reference to a module only while it
 170 * is running can call this to safely exit.  nfsd and lockd use this.
 171 */
 172void __module_put_and_exit(struct module *mod, long code)
 173{
 174	module_put(mod);
 175	do_exit(code);
 176}
 177EXPORT_SYMBOL(__module_put_and_exit);
 178
 179/* Find a module section: 0 means not found. */
 180static unsigned int find_sec(const struct load_info *info, const char *name)
 181{
 182	unsigned int i;
 183
 184	for (i = 1; i < info->hdr->e_shnum; i++) {
 185		Elf_Shdr *shdr = &info->sechdrs[i];
 186		/* Alloc bit cleared means "ignore it." */
 187		if ((shdr->sh_flags & SHF_ALLOC)
 188		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
 189			return i;
 190	}
 191	return 0;
 192}
 193
 194/* Find a module section, or NULL. */
 195static void *section_addr(const struct load_info *info, const char *name)
 196{
 197	/* Section 0 has sh_addr 0. */
 198	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
 199}
 200
 201/* Find a module section, or NULL.  Fill in number of "objects" in section. */
 202static void *section_objs(const struct load_info *info,
 203			  const char *name,
 204			  size_t object_size,
 205			  unsigned int *num)
 206{
 207	unsigned int sec = find_sec(info, name);
 208
 209	/* Section 0 has sh_addr 0 and sh_size 0. */
 210	*num = info->sechdrs[sec].sh_size / object_size;
 211	return (void *)info->sechdrs[sec].sh_addr;
 212}
 213
 214/* Provided by the linker */
 215extern const struct kernel_symbol __start___ksymtab[];
 216extern const struct kernel_symbol __stop___ksymtab[];
 217extern const struct kernel_symbol __start___ksymtab_gpl[];
 218extern const struct kernel_symbol __stop___ksymtab_gpl[];
 219extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 220extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
 221extern const unsigned long __start___kcrctab[];
 222extern const unsigned long __start___kcrctab_gpl[];
 223extern const unsigned long __start___kcrctab_gpl_future[];
 224#ifdef CONFIG_UNUSED_SYMBOLS
 225extern const struct kernel_symbol __start___ksymtab_unused[];
 226extern const struct kernel_symbol __stop___ksymtab_unused[];
 227extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 228extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
 229extern const unsigned long __start___kcrctab_unused[];
 230extern const unsigned long __start___kcrctab_unused_gpl[];
 231#endif
 232
 233#ifndef CONFIG_MODVERSIONS
 234#define symversion(base, idx) NULL
 235#else
 236#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
 237#endif
 238
 239static bool each_symbol_in_section(const struct symsearch *arr,
 240				   unsigned int arrsize,
 241				   struct module *owner,
 242				   bool (*fn)(const struct symsearch *syms,
 243					      struct module *owner,
 244					      void *data),
 245				   void *data)
 246{
 247	unsigned int j;
 248
 249	for (j = 0; j < arrsize; j++) {
 250		if (fn(&arr[j], owner, data))
 251			return true;
 252	}
 253
 254	return false;
 255}
 256
 257/* Returns true as soon as fn returns true, otherwise false. */
 258bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 259				    struct module *owner,
 260				    void *data),
 261			 void *data)
 262{
 263	struct module *mod;
 264	static const struct symsearch arr[] = {
 265		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
 266		  NOT_GPL_ONLY, false },
 267		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
 268		  __start___kcrctab_gpl,
 269		  GPL_ONLY, false },
 270		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
 271		  __start___kcrctab_gpl_future,
 272		  WILL_BE_GPL_ONLY, false },
 273#ifdef CONFIG_UNUSED_SYMBOLS
 274		{ __start___ksymtab_unused, __stop___ksymtab_unused,
 275		  __start___kcrctab_unused,
 276		  NOT_GPL_ONLY, true },
 277		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
 278		  __start___kcrctab_unused_gpl,
 279		  GPL_ONLY, true },
 280#endif
 281	};
 282
 
 
 283	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
 284		return true;
 285
 286	list_for_each_entry_rcu(mod, &modules, list) {
 
 287		struct symsearch arr[] = {
 288			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
 289			  NOT_GPL_ONLY, false },
 290			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
 291			  mod->gpl_crcs,
 292			  GPL_ONLY, false },
 293			{ mod->gpl_future_syms,
 294			  mod->gpl_future_syms + mod->num_gpl_future_syms,
 295			  mod->gpl_future_crcs,
 296			  WILL_BE_GPL_ONLY, false },
 297#ifdef CONFIG_UNUSED_SYMBOLS
 298			{ mod->unused_syms,
 299			  mod->unused_syms + mod->num_unused_syms,
 300			  mod->unused_crcs,
 301			  NOT_GPL_ONLY, true },
 302			{ mod->unused_gpl_syms,
 303			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
 304			  mod->unused_gpl_crcs,
 305			  GPL_ONLY, true },
 306#endif
 307		};
 308
 
 
 
 309		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
 310			return true;
 311	}
 312	return false;
 313}
 314EXPORT_SYMBOL_GPL(each_symbol_section);
 315
 316struct find_symbol_arg {
 317	/* Input */
 318	const char *name;
 319	bool gplok;
 320	bool warn;
 321
 322	/* Output */
 323	struct module *owner;
 324	const unsigned long *crc;
 325	const struct kernel_symbol *sym;
 
 326};
 327
 328static bool check_symbol(const struct symsearch *syms,
 329				 struct module *owner,
 330				 unsigned int symnum, void *data)
 331{
 332	struct find_symbol_arg *fsa = data;
 333
 334	if (!fsa->gplok) {
 335		if (syms->licence == GPL_ONLY)
 336			return false;
 337		if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
 338			printk(KERN_WARNING "Symbol %s is being used "
 339			       "by a non-GPL module, which will not "
 340			       "be allowed in the future\n", fsa->name);
 341			printk(KERN_WARNING "Please see the file "
 342			       "Documentation/feature-removal-schedule.txt "
 343			       "in the kernel source tree for more details.\n");
 344		}
 345	}
 346
 347#ifdef CONFIG_UNUSED_SYMBOLS
 348	if (syms->unused && fsa->warn) {
 349		printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
 350		       "however this module is using it.\n", fsa->name);
 351		printk(KERN_WARNING
 352		       "This symbol will go away in the future.\n");
 353		printk(KERN_WARNING
 354		       "Please evalute if this is the right api to use and if "
 355		       "it really is, submit a report the linux kernel "
 356		       "mailinglist together with submitting your code for "
 357		       "inclusion.\n");
 358	}
 359#endif
 360
 361	fsa->owner = owner;
 362	fsa->crc = symversion(syms->crcs, symnum);
 363	fsa->sym = &syms->start[symnum];
 
 364	return true;
 365}
 366
 367static int cmp_name(const void *va, const void *vb)
 368{
 369	const char *a;
 370	const struct kernel_symbol *b;
 371	a = va; b = vb;
 372	return strcmp(a, b->name);
 
 373}
 374
 375static bool find_symbol_in_section(const struct symsearch *syms,
 376				   struct module *owner,
 377				   void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 378{
 379	struct find_symbol_arg *fsa = data;
 380	struct kernel_symbol *sym;
 381
 382	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
 383			sizeof(struct kernel_symbol), cmp_name);
 384
 385	if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
 
 386		return true;
 387
 388	return false;
 389}
 390
 391/* Find a symbol and return it, along with, (optional) crc and
 392 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 393const struct kernel_symbol *find_symbol(const char *name,
 394					struct module **owner,
 395					const unsigned long **crc,
 
 396					bool gplok,
 397					bool warn)
 398{
 399	struct find_symbol_arg fsa;
 400
 401	fsa.name = name;
 402	fsa.gplok = gplok;
 403	fsa.warn = warn;
 404
 405	if (each_symbol_section(find_symbol_in_section, &fsa)) {
 406		if (owner)
 407			*owner = fsa.owner;
 408		if (crc)
 409			*crc = fsa.crc;
 
 
 410		return fsa.sym;
 411	}
 412
 413	DEBUGP("Failed to find symbol %s\n", name);
 414	return NULL;
 415}
 416EXPORT_SYMBOL_GPL(find_symbol);
 417
 418/* Search for module by name: must hold module_mutex. */
 419struct module *find_module(const char *name)
 
 
 
 
 420{
 421	struct module *mod;
 422
 423	list_for_each_entry(mod, &modules, list) {
 424		if (strcmp(mod->name, name) == 0)
 
 
 
 
 
 425			return mod;
 426	}
 427	return NULL;
 428}
 
 
 
 
 
 
 429EXPORT_SYMBOL_GPL(find_module);
 430
 431#ifdef CONFIG_SMP
 432
 433static inline void __percpu *mod_percpu(struct module *mod)
 434{
 435	return mod->percpu;
 436}
 437
 438static int percpu_modalloc(struct module *mod,
 439			   unsigned long size, unsigned long align)
 440{
 
 
 
 
 
 
 441	if (align > PAGE_SIZE) {
 442		printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
 443		       mod->name, align, PAGE_SIZE);
 444		align = PAGE_SIZE;
 445	}
 446
 447	mod->percpu = __alloc_reserved_percpu(size, align);
 448	if (!mod->percpu) {
 449		printk(KERN_WARNING
 450		       "%s: Could not allocate %lu bytes percpu data\n",
 451		       mod->name, size);
 452		return -ENOMEM;
 453	}
 454	mod->percpu_size = size;
 455	return 0;
 456}
 457
 458static void percpu_modfree(struct module *mod)
 459{
 460	free_percpu(mod->percpu);
 461}
 462
 463static unsigned int find_pcpusec(struct load_info *info)
 464{
 465	return find_sec(info, ".data..percpu");
 466}
 467
 468static void percpu_modcopy(struct module *mod,
 469			   const void *from, unsigned long size)
 470{
 471	int cpu;
 472
 473	for_each_possible_cpu(cpu)
 474		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
 475}
 476
 477/**
 478 * is_module_percpu_address - test whether address is from module static percpu
 479 * @addr: address to test
 480 *
 481 * Test whether @addr belongs to module static percpu area.
 482 *
 483 * RETURNS:
 484 * %true if @addr is from module static percpu area
 485 */
 486bool is_module_percpu_address(unsigned long addr)
 487{
 488	struct module *mod;
 489	unsigned int cpu;
 490
 491	preempt_disable();
 492
 493	list_for_each_entry_rcu(mod, &modules, list) {
 
 
 494		if (!mod->percpu_size)
 495			continue;
 496		for_each_possible_cpu(cpu) {
 497			void *start = per_cpu_ptr(mod->percpu, cpu);
 
 498
 499			if ((void *)addr >= start &&
 500			    (void *)addr < start + mod->percpu_size) {
 
 
 
 
 
 501				preempt_enable();
 502				return true;
 503			}
 504		}
 505	}
 506
 507	preempt_enable();
 508	return false;
 509}
 510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 511#else /* ... !CONFIG_SMP */
 512
 513static inline void __percpu *mod_percpu(struct module *mod)
 514{
 515	return NULL;
 516}
 517static inline int percpu_modalloc(struct module *mod,
 518				  unsigned long size, unsigned long align)
 519{
 520	return -ENOMEM;
 
 
 
 521}
 522static inline void percpu_modfree(struct module *mod)
 523{
 524}
 525static unsigned int find_pcpusec(struct load_info *info)
 526{
 527	return 0;
 528}
 529static inline void percpu_modcopy(struct module *mod,
 530				  const void *from, unsigned long size)
 531{
 532	/* pcpusec should be 0, and size of that section should be 0. */
 533	BUG_ON(size != 0);
 534}
 535bool is_module_percpu_address(unsigned long addr)
 536{
 537	return false;
 538}
 539
 
 
 
 
 
 540#endif /* CONFIG_SMP */
 541
 542#define MODINFO_ATTR(field)	\
 543static void setup_modinfo_##field(struct module *mod, const char *s)  \
 544{                                                                     \
 545	mod->field = kstrdup(s, GFP_KERNEL);                          \
 546}                                                                     \
 547static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
 548			struct module_kobject *mk, char *buffer)      \
 549{                                                                     \
 550	return sprintf(buffer, "%s\n", mk->mod->field);               \
 551}                                                                     \
 552static int modinfo_##field##_exists(struct module *mod)               \
 553{                                                                     \
 554	return mod->field != NULL;                                    \
 555}                                                                     \
 556static void free_modinfo_##field(struct module *mod)                  \
 557{                                                                     \
 558	kfree(mod->field);                                            \
 559	mod->field = NULL;                                            \
 560}                                                                     \
 561static struct module_attribute modinfo_##field = {                    \
 562	.attr = { .name = __stringify(field), .mode = 0444 },         \
 563	.show = show_modinfo_##field,                                 \
 564	.setup = setup_modinfo_##field,                               \
 565	.test = modinfo_##field##_exists,                             \
 566	.free = free_modinfo_##field,                                 \
 567};
 568
 569MODINFO_ATTR(version);
 570MODINFO_ATTR(srcversion);
 571
 572static char last_unloaded_module[MODULE_NAME_LEN+1];
 573
 574#ifdef CONFIG_MODULE_UNLOAD
 575
 576EXPORT_TRACEPOINT_SYMBOL(module_get);
 577
 
 
 
 578/* Init the unload section of the module. */
 579static int module_unload_init(struct module *mod)
 580{
 581	mod->refptr = alloc_percpu(struct module_ref);
 582	if (!mod->refptr)
 583		return -ENOMEM;
 
 
 584
 585	INIT_LIST_HEAD(&mod->source_list);
 586	INIT_LIST_HEAD(&mod->target_list);
 587
 588	/* Hold reference count during initialization. */
 589	__this_cpu_write(mod->refptr->incs, 1);
 590	/* Backwards compatibility macros put refcount during init. */
 591	mod->waiter = current;
 592
 593	return 0;
 594}
 595
 596/* Does a already use b? */
 597static int already_uses(struct module *a, struct module *b)
 598{
 599	struct module_use *use;
 600
 601	list_for_each_entry(use, &b->source_list, source_list) {
 602		if (use->source == a) {
 603			DEBUGP("%s uses %s!\n", a->name, b->name);
 604			return 1;
 605		}
 606	}
 607	DEBUGP("%s does not use %s!\n", a->name, b->name);
 608	return 0;
 609}
 610
 611/*
 612 * Module a uses b
 613 *  - we add 'a' as a "source", 'b' as a "target" of module use
 614 *  - the module_use is added to the list of 'b' sources (so
 615 *    'b' can walk the list to see who sourced them), and of 'a'
 616 *    targets (so 'a' can see what modules it targets).
 617 */
 618static int add_module_usage(struct module *a, struct module *b)
 619{
 620	struct module_use *use;
 621
 622	DEBUGP("Allocating new usage for %s.\n", a->name);
 623	use = kmalloc(sizeof(*use), GFP_ATOMIC);
 624	if (!use) {
 625		printk(KERN_WARNING "%s: out of memory loading\n", a->name);
 626		return -ENOMEM;
 627	}
 628
 629	use->source = a;
 630	use->target = b;
 631	list_add(&use->source_list, &b->source_list);
 632	list_add(&use->target_list, &a->target_list);
 633	return 0;
 634}
 635
 636/* Module a uses b: caller needs module_mutex() */
 637int ref_module(struct module *a, struct module *b)
 638{
 639	int err;
 640
 641	if (b == NULL || already_uses(a, b))
 642		return 0;
 643
 644	/* If module isn't available, we fail. */
 645	err = strong_try_module_get(b);
 646	if (err)
 647		return err;
 648
 649	err = add_module_usage(a, b);
 650	if (err) {
 651		module_put(b);
 652		return err;
 653	}
 654	return 0;
 655}
 656EXPORT_SYMBOL_GPL(ref_module);
 657
 658/* Clear the unload stuff of the module. */
 659static void module_unload_free(struct module *mod)
 660{
 661	struct module_use *use, *tmp;
 662
 663	mutex_lock(&module_mutex);
 664	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
 665		struct module *i = use->target;
 666		DEBUGP("%s unusing %s\n", mod->name, i->name);
 667		module_put(i);
 668		list_del(&use->source_list);
 669		list_del(&use->target_list);
 670		kfree(use);
 671	}
 672	mutex_unlock(&module_mutex);
 673
 674	free_percpu(mod->refptr);
 675}
 676
 677#ifdef CONFIG_MODULE_FORCE_UNLOAD
 678static inline int try_force_unload(unsigned int flags)
 679{
 680	int ret = (flags & O_TRUNC);
 681	if (ret)
 682		add_taint(TAINT_FORCED_RMMOD);
 683	return ret;
 684}
 685#else
 686static inline int try_force_unload(unsigned int flags)
 687{
 688	return 0;
 689}
 690#endif /* CONFIG_MODULE_FORCE_UNLOAD */
 691
 692struct stopref
 
 693{
 694	struct module *mod;
 695	int flags;
 696	int *forced;
 697};
 698
 699/* Whole machine is stopped with interrupts off when this runs. */
 700static int __try_stop_module(void *_sref)
 701{
 702	struct stopref *sref = _sref;
 
 
 
 
 
 703
 
 
 704	/* If it's not unused, quit unless we're forcing. */
 705	if (module_refcount(sref->mod) != 0) {
 706		if (!(*sref->forced = try_force_unload(sref->flags)))
 
 707			return -EWOULDBLOCK;
 708	}
 709
 710	/* Mark it as dying. */
 711	sref->mod->state = MODULE_STATE_GOING;
 712	return 0;
 713}
 714
 715static int try_stop_module(struct module *mod, int flags, int *forced)
 716{
 717	if (flags & O_NONBLOCK) {
 718		struct stopref sref = { mod, flags, forced };
 719
 720		return stop_machine(__try_stop_module, &sref, NULL);
 721	} else {
 722		/* We don't need to stop the machine for this. */
 723		mod->state = MODULE_STATE_GOING;
 724		synchronize_sched();
 725		return 0;
 726	}
 727}
 728
 729unsigned int module_refcount(struct module *mod)
 
 
 
 
 
 
 
 
 
 730{
 731	unsigned int incs = 0, decs = 0;
 732	int cpu;
 733
 734	for_each_possible_cpu(cpu)
 735		decs += per_cpu_ptr(mod->refptr, cpu)->decs;
 736	/*
 737	 * ensure the incs are added up after the decs.
 738	 * module_put ensures incs are visible before decs with smp_wmb.
 739	 *
 740	 * This 2-count scheme avoids the situation where the refcount
 741	 * for CPU0 is read, then CPU0 increments the module refcount,
 742	 * then CPU1 drops that refcount, then the refcount for CPU1 is
 743	 * read. We would record a decrement but not its corresponding
 744	 * increment so we would see a low count (disaster).
 745	 *
 746	 * Rare situation? But module_refcount can be preempted, and we
 747	 * might be tallying up 4096+ CPUs. So it is not impossible.
 748	 */
 749	smp_rmb();
 750	for_each_possible_cpu(cpu)
 751		incs += per_cpu_ptr(mod->refptr, cpu)->incs;
 752	return incs - decs;
 753}
 754EXPORT_SYMBOL(module_refcount);
 755
 756/* This exists whether we can unload or not */
 757static void free_module(struct module *mod);
 758
 759static void wait_for_zero_refcount(struct module *mod)
 760{
 761	/* Since we might sleep for some time, release the mutex first */
 762	mutex_unlock(&module_mutex);
 763	for (;;) {
 764		DEBUGP("Looking at refcount...\n");
 765		set_current_state(TASK_UNINTERRUPTIBLE);
 766		if (module_refcount(mod) == 0)
 767			break;
 768		schedule();
 769	}
 770	current->state = TASK_RUNNING;
 771	mutex_lock(&module_mutex);
 772}
 773
 774SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 775		unsigned int, flags)
 776{
 777	struct module *mod;
 778	char name[MODULE_NAME_LEN];
 779	int ret, forced = 0;
 780
 781	if (!capable(CAP_SYS_MODULE) || modules_disabled)
 782		return -EPERM;
 783
 784	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
 785		return -EFAULT;
 786	name[MODULE_NAME_LEN-1] = '\0';
 787
 
 
 788	if (mutex_lock_interruptible(&module_mutex) != 0)
 789		return -EINTR;
 790
 791	mod = find_module(name);
 792	if (!mod) {
 793		ret = -ENOENT;
 794		goto out;
 795	}
 796
 797	if (!list_empty(&mod->source_list)) {
 798		/* Other modules depend on us: get rid of them first. */
 799		ret = -EWOULDBLOCK;
 800		goto out;
 801	}
 802
 803	/* Doing init or already dying? */
 804	if (mod->state != MODULE_STATE_LIVE) {
 805		/* FIXME: if (force), slam module count and wake up
 806                   waiter --RR */
 807		DEBUGP("%s already dying\n", mod->name);
 808		ret = -EBUSY;
 809		goto out;
 810	}
 811
 812	/* If it has an init func, it must have an exit func to unload */
 813	if (mod->init && !mod->exit) {
 814		forced = try_force_unload(flags);
 815		if (!forced) {
 816			/* This module can't be removed */
 817			ret = -EBUSY;
 818			goto out;
 819		}
 820	}
 821
 822	/* Set this up before setting mod->state */
 823	mod->waiter = current;
 824
 825	/* Stop the machine so refcounts can't move and disable module. */
 826	ret = try_stop_module(mod, flags, &forced);
 827	if (ret != 0)
 828		goto out;
 829
 830	/* Never wait if forced. */
 831	if (!forced && module_refcount(mod) != 0)
 832		wait_for_zero_refcount(mod);
 833
 834	mutex_unlock(&module_mutex);
 835	/* Final destruction now no one is using it. */
 836	if (mod->exit != NULL)
 837		mod->exit();
 838	blocking_notifier_call_chain(&module_notify_list,
 839				     MODULE_STATE_GOING, mod);
 
 
 
 840	async_synchronize_full();
 841
 842	/* Store the name of the last unloaded module for diagnostic purposes */
 843	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
 844
 845	free_module(mod);
 
 
 846	return 0;
 847out:
 848	mutex_unlock(&module_mutex);
 849	return ret;
 850}
 851
 852static inline void print_unload_info(struct seq_file *m, struct module *mod)
 853{
 854	struct module_use *use;
 855	int printed_something = 0;
 856
 857	seq_printf(m, " %u ", module_refcount(mod));
 858
 859	/* Always include a trailing , so userspace can differentiate
 860           between this and the old multi-field proc format. */
 
 
 861	list_for_each_entry(use, &mod->source_list, source_list) {
 862		printed_something = 1;
 863		seq_printf(m, "%s,", use->source->name);
 864	}
 865
 866	if (mod->init != NULL && mod->exit == NULL) {
 867		printed_something = 1;
 868		seq_printf(m, "[permanent],");
 869	}
 870
 871	if (!printed_something)
 872		seq_printf(m, "-");
 873}
 874
 875void __symbol_put(const char *symbol)
 876{
 877	struct module *owner;
 878
 879	preempt_disable();
 880	if (!find_symbol(symbol, &owner, NULL, true, false))
 881		BUG();
 882	module_put(owner);
 883	preempt_enable();
 884}
 885EXPORT_SYMBOL(__symbol_put);
 886
 887/* Note this assumes addr is a function, which it currently always is. */
 888void symbol_put_addr(void *addr)
 889{
 890	struct module *modaddr;
 891	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
 892
 893	if (core_kernel_text(a))
 894		return;
 895
 896	/* module_text_address is safe here: we're supposed to have reference
 897	 * to module from symbol_get, so it can't go away. */
 
 
 
 898	modaddr = __module_text_address(a);
 899	BUG_ON(!modaddr);
 900	module_put(modaddr);
 
 901}
 902EXPORT_SYMBOL_GPL(symbol_put_addr);
 903
 904static ssize_t show_refcnt(struct module_attribute *mattr,
 905			   struct module_kobject *mk, char *buffer)
 906{
 907	return sprintf(buffer, "%u\n", module_refcount(mk->mod));
 908}
 909
 910static struct module_attribute refcnt = {
 911	.attr = { .name = "refcnt", .mode = 0444 },
 912	.show = show_refcnt,
 913};
 914
 915void module_put(struct module *module)
 
 
 
 
 
 
 
 
 
 
 
 916{
 
 
 917	if (module) {
 918		preempt_disable();
 919		smp_wmb(); /* see comment in module_refcount */
 920		__this_cpu_inc(module->refptr->decs);
 
 
 
 
 921
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 922		trace_module_put(module, _RET_IP_);
 923		/* Maybe they're waiting for us to drop reference? */
 924		if (unlikely(!module_is_live(module)))
 925			wake_up_process(module->waiter);
 926		preempt_enable();
 927	}
 928}
 929EXPORT_SYMBOL(module_put);
 930
 931#else /* !CONFIG_MODULE_UNLOAD */
 932static inline void print_unload_info(struct seq_file *m, struct module *mod)
 933{
 934	/* We don't know the usage count, or what modules are using. */
 935	seq_printf(m, " - -");
 936}
 937
 938static inline void module_unload_free(struct module *mod)
 939{
 940}
 941
 942int ref_module(struct module *a, struct module *b)
 943{
 944	return strong_try_module_get(b);
 945}
 946EXPORT_SYMBOL_GPL(ref_module);
 947
 948static inline int module_unload_init(struct module *mod)
 949{
 950	return 0;
 951}
 952#endif /* CONFIG_MODULE_UNLOAD */
 953
 
 
 
 
 
 
 
 
 
 
 
 
 
 954static ssize_t show_initstate(struct module_attribute *mattr,
 955			      struct module_kobject *mk, char *buffer)
 956{
 957	const char *state = "unknown";
 958
 959	switch (mk->mod->state) {
 960	case MODULE_STATE_LIVE:
 961		state = "live";
 962		break;
 963	case MODULE_STATE_COMING:
 964		state = "coming";
 965		break;
 966	case MODULE_STATE_GOING:
 967		state = "going";
 968		break;
 
 
 969	}
 970	return sprintf(buffer, "%s\n", state);
 971}
 972
 973static struct module_attribute initstate = {
 974	.attr = { .name = "initstate", .mode = 0444 },
 975	.show = show_initstate,
 976};
 977
 978static ssize_t store_uevent(struct module_attribute *mattr,
 979			    struct module_kobject *mk,
 980			    const char *buffer, size_t count)
 981{
 982	enum kobject_action action;
 983
 984	if (kobject_action_type(buffer, count, &action) == 0)
 985		kobject_uevent(&mk->kobj, action);
 986	return count;
 987}
 988
 989struct module_attribute module_uevent = {
 990	.attr = { .name = "uevent", .mode = 0200 },
 991	.store = store_uevent,
 992};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 993
 994static struct module_attribute *modinfo_attrs[] = {
 
 995	&modinfo_version,
 996	&modinfo_srcversion,
 997	&initstate,
 998	&module_uevent,
 
 
 999#ifdef CONFIG_MODULE_UNLOAD
1000	&refcnt,
1001#endif
1002	NULL,
1003};
1004
1005static const char vermagic[] = VERMAGIC_STRING;
1006
1007static int try_to_force_load(struct module *mod, const char *reason)
1008{
1009#ifdef CONFIG_MODULE_FORCE_LOAD
1010	if (!test_taint(TAINT_FORCED_MODULE))
1011		printk(KERN_WARNING "%s: %s: kernel tainted.\n",
1012		       mod->name, reason);
1013	add_taint_module(mod, TAINT_FORCED_MODULE);
1014	return 0;
1015#else
1016	return -ENOEXEC;
1017#endif
1018}
1019
1020#ifdef CONFIG_MODVERSIONS
1021/* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1022static unsigned long maybe_relocated(unsigned long crc,
1023				     const struct module *crc_owner)
1024{
1025#ifdef ARCH_RELOCATES_KCRCTAB
1026	if (crc_owner == NULL)
1027		return crc - (unsigned long)reloc_start;
1028#endif
1029	return crc;
1030}
1031
1032static int check_version(Elf_Shdr *sechdrs,
1033			 unsigned int versindex,
1034			 const char *symname,
1035			 struct module *mod, 
1036			 const unsigned long *crc,
1037			 const struct module *crc_owner)
1038{
 
 
1039	unsigned int i, num_versions;
1040	struct modversion_info *versions;
1041
1042	/* Exporting module didn't supply crcs?  OK, we're already tainted. */
1043	if (!crc)
1044		return 1;
1045
1046	/* No versions at all?  modprobe --force does this. */
1047	if (versindex == 0)
1048		return try_to_force_load(mod, symname) == 0;
1049
1050	versions = (void *) sechdrs[versindex].sh_addr;
1051	num_versions = sechdrs[versindex].sh_size
1052		/ sizeof(struct modversion_info);
1053
1054	for (i = 0; i < num_versions; i++) {
 
 
1055		if (strcmp(versions[i].name, symname) != 0)
1056			continue;
1057
1058		if (versions[i].crc == maybe_relocated(*crc, crc_owner))
 
 
 
 
1059			return 1;
1060		DEBUGP("Found checksum %lX vs module %lX\n",
1061		       maybe_relocated(*crc, crc_owner), versions[i].crc);
1062		goto bad_version;
1063	}
1064
1065	printk(KERN_WARNING "%s: no symbol version for %s\n",
1066	       mod->name, symname);
1067	return 0;
1068
1069bad_version:
1070	printk("%s: disagrees about version of symbol %s\n",
1071	       mod->name, symname);
1072	return 0;
1073}
1074
1075static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1076					  unsigned int versindex,
1077					  struct module *mod)
1078{
1079	const unsigned long *crc;
1080
1081	/* Since this should be found in kernel (which can't be removed),
1082	 * no locking is necessary. */
1083	if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1084			 &crc, true, false))
 
 
 
1085		BUG();
1086	return check_version(sechdrs, versindex, "module_layout", mod, crc,
1087			     NULL);
 
1088}
1089
1090/* First part is kernel version, which we ignore if module has crcs. */
1091static inline int same_magic(const char *amagic, const char *bmagic,
1092			     bool has_crcs)
1093{
1094	if (has_crcs) {
1095		amagic += strcspn(amagic, " ");
1096		bmagic += strcspn(bmagic, " ");
1097	}
1098	return strcmp(amagic, bmagic) == 0;
1099}
1100#else
1101static inline int check_version(Elf_Shdr *sechdrs,
1102				unsigned int versindex,
1103				const char *symname,
1104				struct module *mod, 
1105				const unsigned long *crc,
1106				const struct module *crc_owner)
1107{
1108	return 1;
1109}
1110
1111static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1112					  unsigned int versindex,
1113					  struct module *mod)
1114{
1115	return 1;
1116}
1117
1118static inline int same_magic(const char *amagic, const char *bmagic,
1119			     bool has_crcs)
1120{
1121	return strcmp(amagic, bmagic) == 0;
1122}
1123#endif /* CONFIG_MODVERSIONS */
1124
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1125/* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1126static const struct kernel_symbol *resolve_symbol(struct module *mod,
1127						  const struct load_info *info,
1128						  const char *name,
1129						  char ownername[])
1130{
1131	struct module *owner;
1132	const struct kernel_symbol *sym;
1133	const unsigned long *crc;
 
1134	int err;
1135
 
 
 
 
 
 
1136	mutex_lock(&module_mutex);
1137	sym = find_symbol(name, &owner, &crc,
1138			  !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1139	if (!sym)
1140		goto unlock;
1141
1142	if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1143			   owner)) {
 
 
 
 
 
 
 
1144		sym = ERR_PTR(-EINVAL);
1145		goto getname;
1146	}
1147
 
 
 
 
 
 
1148	err = ref_module(mod, owner);
1149	if (err) {
1150		sym = ERR_PTR(err);
1151		goto getname;
1152	}
1153
1154getname:
1155	/* We must make copy under the lock if we failed to get ref. */
1156	strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1157unlock:
1158	mutex_unlock(&module_mutex);
1159	return sym;
1160}
1161
1162static const struct kernel_symbol *
1163resolve_symbol_wait(struct module *mod,
1164		    const struct load_info *info,
1165		    const char *name)
1166{
1167	const struct kernel_symbol *ksym;
1168	char owner[MODULE_NAME_LEN];
1169
1170	if (wait_event_interruptible_timeout(module_wq,
1171			!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1172			|| PTR_ERR(ksym) != -EBUSY,
1173					     30 * HZ) <= 0) {
1174		printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1175		       mod->name, owner);
1176	}
1177	return ksym;
1178}
1179
1180/*
1181 * /sys/module/foo/sections stuff
1182 * J. Corbet <corbet@lwn.net>
1183 */
1184#ifdef CONFIG_SYSFS
1185
1186#ifdef CONFIG_KALLSYMS
1187static inline bool sect_empty(const Elf_Shdr *sect)
1188{
1189	return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1190}
1191
1192struct module_sect_attr
1193{
1194	struct module_attribute mattr;
1195	char *name;
1196	unsigned long address;
1197};
1198
1199struct module_sect_attrs
1200{
1201	struct attribute_group grp;
1202	unsigned int nsections;
1203	struct module_sect_attr attrs[0];
1204};
1205
1206static ssize_t module_sect_show(struct module_attribute *mattr,
1207				struct module_kobject *mk, char *buf)
 
 
1208{
1209	struct module_sect_attr *sattr =
1210		container_of(mattr, struct module_sect_attr, mattr);
1211	return sprintf(buf, "0x%pK\n", (void *)sattr->address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1212}
1213
1214static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1215{
1216	unsigned int section;
1217
1218	for (section = 0; section < sect_attrs->nsections; section++)
1219		kfree(sect_attrs->attrs[section].name);
1220	kfree(sect_attrs);
1221}
1222
1223static void add_sect_attrs(struct module *mod, const struct load_info *info)
1224{
1225	unsigned int nloaded = 0, i, size[2];
1226	struct module_sect_attrs *sect_attrs;
1227	struct module_sect_attr *sattr;
1228	struct attribute **gattr;
1229
1230	/* Count loaded sections and allocate structures */
1231	for (i = 0; i < info->hdr->e_shnum; i++)
1232		if (!sect_empty(&info->sechdrs[i]))
1233			nloaded++;
1234	size[0] = ALIGN(sizeof(*sect_attrs)
1235			+ nloaded * sizeof(sect_attrs->attrs[0]),
1236			sizeof(sect_attrs->grp.attrs[0]));
1237	size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1238	sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1239	if (sect_attrs == NULL)
1240		return;
1241
1242	/* Setup section attributes. */
1243	sect_attrs->grp.name = "sections";
1244	sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1245
1246	sect_attrs->nsections = 0;
1247	sattr = &sect_attrs->attrs[0];
1248	gattr = &sect_attrs->grp.attrs[0];
1249	for (i = 0; i < info->hdr->e_shnum; i++) {
1250		Elf_Shdr *sec = &info->sechdrs[i];
1251		if (sect_empty(sec))
1252			continue;
 
1253		sattr->address = sec->sh_addr;
1254		sattr->name = kstrdup(info->secstrings + sec->sh_name,
1255					GFP_KERNEL);
1256		if (sattr->name == NULL)
1257			goto out;
1258		sect_attrs->nsections++;
1259		sysfs_attr_init(&sattr->mattr.attr);
1260		sattr->mattr.show = module_sect_show;
1261		sattr->mattr.store = NULL;
1262		sattr->mattr.attr.name = sattr->name;
1263		sattr->mattr.attr.mode = S_IRUGO;
1264		*(gattr++) = &(sattr++)->mattr.attr;
1265	}
1266	*gattr = NULL;
1267
1268	if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1269		goto out;
1270
1271	mod->sect_attrs = sect_attrs;
1272	return;
1273  out:
1274	free_sect_attrs(sect_attrs);
1275}
1276
1277static void remove_sect_attrs(struct module *mod)
1278{
1279	if (mod->sect_attrs) {
1280		sysfs_remove_group(&mod->mkobj.kobj,
1281				   &mod->sect_attrs->grp);
1282		/* We are positive that no one is using any sect attrs
1283		 * at this point.  Deallocate immediately. */
1284		free_sect_attrs(mod->sect_attrs);
1285		mod->sect_attrs = NULL;
1286	}
1287}
1288
1289/*
1290 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1291 */
1292
1293struct module_notes_attrs {
1294	struct kobject *dir;
1295	unsigned int notes;
1296	struct bin_attribute attrs[0];
1297};
1298
1299static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1300				 struct bin_attribute *bin_attr,
1301				 char *buf, loff_t pos, size_t count)
1302{
1303	/*
1304	 * The caller checked the pos and count against our size.
1305	 */
1306	memcpy(buf, bin_attr->private + pos, count);
1307	return count;
1308}
1309
1310static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1311			     unsigned int i)
1312{
1313	if (notes_attrs->dir) {
1314		while (i-- > 0)
1315			sysfs_remove_bin_file(notes_attrs->dir,
1316					      &notes_attrs->attrs[i]);
1317		kobject_put(notes_attrs->dir);
1318	}
1319	kfree(notes_attrs);
1320}
1321
1322static void add_notes_attrs(struct module *mod, const struct load_info *info)
1323{
1324	unsigned int notes, loaded, i;
1325	struct module_notes_attrs *notes_attrs;
1326	struct bin_attribute *nattr;
1327
1328	/* failed to create section attributes, so can't create notes */
1329	if (!mod->sect_attrs)
1330		return;
1331
1332	/* Count notes sections and allocate structures.  */
1333	notes = 0;
1334	for (i = 0; i < info->hdr->e_shnum; i++)
1335		if (!sect_empty(&info->sechdrs[i]) &&
1336		    (info->sechdrs[i].sh_type == SHT_NOTE))
1337			++notes;
1338
1339	if (notes == 0)
1340		return;
1341
1342	notes_attrs = kzalloc(sizeof(*notes_attrs)
1343			      + notes * sizeof(notes_attrs->attrs[0]),
1344			      GFP_KERNEL);
1345	if (notes_attrs == NULL)
1346		return;
1347
1348	notes_attrs->notes = notes;
1349	nattr = &notes_attrs->attrs[0];
1350	for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1351		if (sect_empty(&info->sechdrs[i]))
1352			continue;
1353		if (info->sechdrs[i].sh_type == SHT_NOTE) {
1354			sysfs_bin_attr_init(nattr);
1355			nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1356			nattr->attr.mode = S_IRUGO;
1357			nattr->size = info->sechdrs[i].sh_size;
1358			nattr->private = (void *) info->sechdrs[i].sh_addr;
1359			nattr->read = module_notes_read;
1360			++nattr;
1361		}
1362		++loaded;
1363	}
1364
1365	notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1366	if (!notes_attrs->dir)
1367		goto out;
1368
1369	for (i = 0; i < notes; ++i)
1370		if (sysfs_create_bin_file(notes_attrs->dir,
1371					  &notes_attrs->attrs[i]))
1372			goto out;
1373
1374	mod->notes_attrs = notes_attrs;
1375	return;
1376
1377  out:
1378	free_notes_attrs(notes_attrs, i);
1379}
1380
1381static void remove_notes_attrs(struct module *mod)
1382{
1383	if (mod->notes_attrs)
1384		free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1385}
1386
1387#else
1388
1389static inline void add_sect_attrs(struct module *mod,
1390				  const struct load_info *info)
1391{
1392}
1393
1394static inline void remove_sect_attrs(struct module *mod)
1395{
1396}
1397
1398static inline void add_notes_attrs(struct module *mod,
1399				   const struct load_info *info)
1400{
1401}
1402
1403static inline void remove_notes_attrs(struct module *mod)
1404{
1405}
1406#endif /* CONFIG_KALLSYMS */
1407
1408static void add_usage_links(struct module *mod)
1409{
1410#ifdef CONFIG_MODULE_UNLOAD
1411	struct module_use *use;
1412	int nowarn;
1413
1414	mutex_lock(&module_mutex);
1415	list_for_each_entry(use, &mod->target_list, target_list) {
1416		nowarn = sysfs_create_link(use->target->holders_dir,
1417					   &mod->mkobj.kobj, mod->name);
1418	}
1419	mutex_unlock(&module_mutex);
1420#endif
1421}
1422
1423static void del_usage_links(struct module *mod)
1424{
 
1425#ifdef CONFIG_MODULE_UNLOAD
1426	struct module_use *use;
1427
1428	mutex_lock(&module_mutex);
1429	list_for_each_entry(use, &mod->target_list, target_list)
1430		sysfs_remove_link(use->target->holders_dir, mod->name);
 
 
 
 
1431	mutex_unlock(&module_mutex);
 
 
1432#endif
 
1433}
1434
 
 
1435static int module_add_modinfo_attrs(struct module *mod)
1436{
1437	struct module_attribute *attr;
1438	struct module_attribute *temp_attr;
1439	int error = 0;
1440	int i;
1441
1442	mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1443					(ARRAY_SIZE(modinfo_attrs) + 1)),
1444					GFP_KERNEL);
1445	if (!mod->modinfo_attrs)
1446		return -ENOMEM;
1447
1448	temp_attr = mod->modinfo_attrs;
1449	for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1450		if (!attr->test ||
1451		    (attr->test && attr->test(mod))) {
1452			memcpy(temp_attr, attr, sizeof(*temp_attr));
1453			sysfs_attr_init(&temp_attr->attr);
1454			error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
 
 
 
1455			++temp_attr;
1456		}
1457	}
 
 
 
 
 
 
 
 
1458	return error;
1459}
1460
1461static void module_remove_modinfo_attrs(struct module *mod)
1462{
1463	struct module_attribute *attr;
1464	int i;
1465
1466	for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
 
 
1467		/* pick a field to test for end of list */
1468		if (!attr->attr.name)
1469			break;
1470		sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1471		if (attr->free)
1472			attr->free(mod);
1473	}
1474	kfree(mod->modinfo_attrs);
1475}
1476
 
 
 
 
 
 
 
 
1477static int mod_sysfs_init(struct module *mod)
1478{
1479	int err;
1480	struct kobject *kobj;
1481
1482	if (!module_sysfs_initialized) {
1483		printk(KERN_ERR "%s: module sysfs not initialized\n",
1484		       mod->name);
1485		err = -EINVAL;
1486		goto out;
1487	}
1488
1489	kobj = kset_find_obj(module_kset, mod->name);
1490	if (kobj) {
1491		printk(KERN_ERR "%s: module is already loaded\n", mod->name);
1492		kobject_put(kobj);
1493		err = -EINVAL;
1494		goto out;
1495	}
1496
1497	mod->mkobj.mod = mod;
1498
1499	memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1500	mod->mkobj.kobj.kset = module_kset;
1501	err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1502				   "%s", mod->name);
1503	if (err)
1504		kobject_put(&mod->mkobj.kobj);
1505
1506	/* delay uevent until full sysfs population */
1507out:
1508	return err;
1509}
1510
1511static int mod_sysfs_setup(struct module *mod,
1512			   const struct load_info *info,
1513			   struct kernel_param *kparam,
1514			   unsigned int num_params)
1515{
1516	int err;
1517
1518	err = mod_sysfs_init(mod);
1519	if (err)
1520		goto out;
1521
1522	mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1523	if (!mod->holders_dir) {
1524		err = -ENOMEM;
1525		goto out_unreg;
1526	}
1527
1528	err = module_param_sysfs_setup(mod, kparam, num_params);
1529	if (err)
1530		goto out_unreg_holders;
1531
1532	err = module_add_modinfo_attrs(mod);
1533	if (err)
1534		goto out_unreg_param;
1535
1536	add_usage_links(mod);
 
 
 
1537	add_sect_attrs(mod, info);
1538	add_notes_attrs(mod, info);
1539
1540	kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1541	return 0;
1542
 
 
1543out_unreg_param:
1544	module_param_sysfs_remove(mod);
1545out_unreg_holders:
1546	kobject_put(mod->holders_dir);
1547out_unreg:
1548	kobject_put(&mod->mkobj.kobj);
1549out:
1550	return err;
1551}
1552
1553static void mod_sysfs_fini(struct module *mod)
1554{
1555	remove_notes_attrs(mod);
1556	remove_sect_attrs(mod);
1557	kobject_put(&mod->mkobj.kobj);
1558}
1559
 
 
 
 
1560#else /* !CONFIG_SYSFS */
1561
1562static int mod_sysfs_setup(struct module *mod,
1563			   const struct load_info *info,
1564			   struct kernel_param *kparam,
1565			   unsigned int num_params)
1566{
1567	return 0;
1568}
1569
1570static void mod_sysfs_fini(struct module *mod)
1571{
1572}
1573
1574static void module_remove_modinfo_attrs(struct module *mod)
1575{
1576}
1577
1578static void del_usage_links(struct module *mod)
1579{
1580}
1581
 
 
 
1582#endif /* CONFIG_SYSFS */
1583
1584static void mod_sysfs_teardown(struct module *mod)
1585{
1586	del_usage_links(mod);
1587	module_remove_modinfo_attrs(mod);
1588	module_param_sysfs_remove(mod);
1589	kobject_put(mod->mkobj.drivers_dir);
1590	kobject_put(mod->holders_dir);
1591	mod_sysfs_fini(mod);
1592}
1593
1594/*
1595 * unlink the module with the whole machine is stopped with interrupts off
1596 * - this defends against kallsyms not taking locks
 
 
 
 
 
 
 
 
 
1597 */
1598static int __unlink_module(void *_mod)
1599{
1600	struct module *mod = _mod;
1601	list_del(&mod->list);
1602	module_bug_cleanup(mod);
1603	return 0;
1604}
1605
1606#ifdef CONFIG_DEBUG_SET_MODULE_RONX
1607/*
1608 * LKM RO/NX protection: protect module's text/ro-data
1609 * from modification and any data from execution.
 
 
1610 */
1611void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
 
 
1612{
1613	unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1614	unsigned long end_pfn = PFN_DOWN((unsigned long)end);
 
 
 
1615
1616	if (end_pfn > begin_pfn)
1617		set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
 
 
1618}
 
 
 
1619
1620static void set_section_ro_nx(void *base,
1621			unsigned long text_size,
1622			unsigned long ro_size,
1623			unsigned long total_size)
1624{
1625	/* begin and end PFNs of the current subsection */
1626	unsigned long begin_pfn;
1627	unsigned long end_pfn;
 
 
 
1628
1629	/*
1630	 * Set RO for module text and RO-data:
1631	 * - Always protect first page.
1632	 * - Do not protect last partial page.
1633	 */
1634	if (ro_size > 0)
1635		set_page_attributes(base, base + ro_size, set_memory_ro);
 
 
1636
1637	/*
1638	 * Set NX permissions for module data:
1639	 * - Do not protect first partial page.
1640	 * - Always protect last page.
1641	 */
1642	if (total_size > text_size) {
1643		begin_pfn = PFN_UP((unsigned long)base + text_size);
1644		end_pfn = PFN_UP((unsigned long)base + total_size);
1645		if (end_pfn > begin_pfn)
1646			set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1647	}
1648}
1649
1650static void unset_module_core_ro_nx(struct module *mod)
1651{
1652	set_page_attributes(mod->module_core + mod->core_text_size,
1653		mod->module_core + mod->core_size,
1654		set_memory_x);
1655	set_page_attributes(mod->module_core,
1656		mod->module_core + mod->core_ro_size,
1657		set_memory_rw);
 
 
 
 
 
 
 
1658}
1659
1660static void unset_module_init_ro_nx(struct module *mod)
1661{
1662	set_page_attributes(mod->module_init + mod->init_text_size,
1663		mod->module_init + mod->init_size,
1664		set_memory_x);
1665	set_page_attributes(mod->module_init,
1666		mod->module_init + mod->init_ro_size,
1667		set_memory_rw);
1668}
1669
1670/* Iterate through all modules and set each module's text as RW */
1671void set_all_modules_text_rw(void)
1672{
1673	struct module *mod;
 
1674
1675	mutex_lock(&module_mutex);
1676	list_for_each_entry_rcu(mod, &modules, list) {
1677		if ((mod->module_core) && (mod->core_text_size)) {
1678			set_page_attributes(mod->module_core,
1679						mod->module_core + mod->core_text_size,
1680						set_memory_rw);
1681		}
1682		if ((mod->module_init) && (mod->init_text_size)) {
1683			set_page_attributes(mod->module_init,
1684						mod->module_init + mod->init_text_size,
1685						set_memory_rw);
1686		}
1687	}
1688	mutex_unlock(&module_mutex);
 
1689}
1690
1691/* Iterate through all modules and set each module's text as RO */
1692void set_all_modules_text_ro(void)
 
 
 
1693{
1694	struct module *mod;
 
 
1695
1696	mutex_lock(&module_mutex);
1697	list_for_each_entry_rcu(mod, &modules, list) {
1698		if ((mod->module_core) && (mod->core_text_size)) {
1699			set_page_attributes(mod->module_core,
1700						mod->module_core + mod->core_text_size,
1701						set_memory_ro);
1702		}
1703		if ((mod->module_init) && (mod->init_text_size)) {
1704			set_page_attributes(mod->module_init,
1705						mod->module_init + mod->init_text_size,
1706						set_memory_ro);
1707		}
1708	}
1709	mutex_unlock(&module_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1710}
1711#else
1712static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1713static void unset_module_core_ro_nx(struct module *mod) { }
1714static void unset_module_init_ro_nx(struct module *mod) { }
1715#endif
1716
1717void __weak module_free(struct module *mod, void *module_region)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1718{
 
 
 
 
 
1719	vfree(module_region);
1720}
1721
1722void __weak module_arch_cleanup(struct module *mod)
1723{
1724}
1725
 
 
 
 
1726/* Free a module, remove from lists, etc. */
1727static void free_module(struct module *mod)
1728{
1729	trace_module_free(mod);
1730
1731	/* Delete from various lists */
 
 
 
1732	mutex_lock(&module_mutex);
1733	stop_machine(__unlink_module, mod, NULL);
1734	mutex_unlock(&module_mutex);
1735	mod_sysfs_teardown(mod);
1736
1737	/* Remove dynamic debug info */
1738	ddebug_remove_module(mod->name);
1739
1740	/* Arch-specific cleanup. */
1741	module_arch_cleanup(mod);
1742
1743	/* Module unload stuff */
1744	module_unload_free(mod);
1745
1746	/* Free any allocated parameters. */
1747	destroy_params(mod->kp, mod->num_kp);
1748
1749	/* This may be NULL, but that's OK */
1750	unset_module_init_ro_nx(mod);
1751	module_free(mod, mod->module_init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1752	kfree(mod->args);
1753	percpu_modfree(mod);
1754
1755	/* Free lock-classes: */
1756	lockdep_free_key_range(mod->module_core, mod->core_size);
1757
1758	/* Finally, free the core (containing the module structure) */
1759	unset_module_core_ro_nx(mod);
1760	module_free(mod, mod->module_core);
1761
1762#ifdef CONFIG_MPU
1763	update_protections(current->mm);
1764#endif
1765}
1766
1767void *__symbol_get(const char *symbol)
1768{
1769	struct module *owner;
1770	const struct kernel_symbol *sym;
1771
1772	preempt_disable();
1773	sym = find_symbol(symbol, &owner, NULL, true, true);
1774	if (sym && strong_try_module_get(owner))
1775		sym = NULL;
1776	preempt_enable();
1777
1778	return sym ? (void *)sym->value : NULL;
1779}
1780EXPORT_SYMBOL_GPL(__symbol_get);
1781
1782/*
1783 * Ensure that an exported symbol [global namespace] does not already exist
1784 * in the kernel or in some other module's exported symbol table.
1785 *
1786 * You must hold the module_mutex.
1787 */
1788static int verify_export_symbols(struct module *mod)
1789{
1790	unsigned int i;
1791	struct module *owner;
1792	const struct kernel_symbol *s;
1793	struct {
1794		const struct kernel_symbol *sym;
1795		unsigned int num;
1796	} arr[] = {
1797		{ mod->syms, mod->num_syms },
1798		{ mod->gpl_syms, mod->num_gpl_syms },
1799		{ mod->gpl_future_syms, mod->num_gpl_future_syms },
1800#ifdef CONFIG_UNUSED_SYMBOLS
1801		{ mod->unused_syms, mod->num_unused_syms },
1802		{ mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1803#endif
1804	};
1805
1806	for (i = 0; i < ARRAY_SIZE(arr); i++) {
1807		for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1808			if (find_symbol(s->name, &owner, NULL, true, false)) {
1809				printk(KERN_ERR
1810				       "%s: exports duplicate symbol %s"
1811				       " (owned by %s)\n",
1812				       mod->name, s->name, module_name(owner));
 
1813				return -ENOEXEC;
1814			}
1815		}
1816	}
1817	return 0;
1818}
1819
1820/* Change all symbols so that st_value encodes the pointer directly. */
1821static int simplify_symbols(struct module *mod, const struct load_info *info)
1822{
1823	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1824	Elf_Sym *sym = (void *)symsec->sh_addr;
1825	unsigned long secbase;
1826	unsigned int i;
1827	int ret = 0;
1828	const struct kernel_symbol *ksym;
1829
1830	for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1831		const char *name = info->strtab + sym[i].st_name;
1832
1833		switch (sym[i].st_shndx) {
1834		case SHN_COMMON:
 
 
 
 
1835			/* We compiled with -fno-common.  These are not
1836			   supposed to happen.  */
1837			DEBUGP("Common symbol: %s\n", name);
1838			printk("%s: please compile with -fno-common\n",
1839			       mod->name);
1840			ret = -ENOEXEC;
1841			break;
1842
1843		case SHN_ABS:
1844			/* Don't need to do anything */
1845			DEBUGP("Absolute symbol: 0x%08lx\n",
1846			       (long)sym[i].st_value);
1847			break;
1848
 
 
 
 
1849		case SHN_UNDEF:
1850			ksym = resolve_symbol_wait(mod, info, name);
1851			/* Ok if resolved.  */
1852			if (ksym && !IS_ERR(ksym)) {
1853				sym[i].st_value = ksym->value;
1854				break;
1855			}
1856
1857			/* Ok if weak.  */
1858			if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1859				break;
1860
1861			printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
1862			       mod->name, name, PTR_ERR(ksym));
1863			ret = PTR_ERR(ksym) ?: -ENOENT;
 
 
1864			break;
1865
1866		default:
1867			/* Divert to percpu allocation if a percpu var. */
1868			if (sym[i].st_shndx == info->index.pcpu)
1869				secbase = (unsigned long)mod_percpu(mod);
1870			else
1871				secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
1872			sym[i].st_value += secbase;
1873			break;
1874		}
1875	}
1876
1877	return ret;
1878}
1879
1880int __weak apply_relocate(Elf_Shdr *sechdrs,
1881			  const char *strtab,
1882			  unsigned int symindex,
1883			  unsigned int relsec,
1884			  struct module *me)
1885{
1886	pr_err("module %s: REL relocation unsupported\n", me->name);
1887	return -ENOEXEC;
1888}
1889
1890int __weak apply_relocate_add(Elf_Shdr *sechdrs,
1891			      const char *strtab,
1892			      unsigned int symindex,
1893			      unsigned int relsec,
1894			      struct module *me)
1895{
1896	pr_err("module %s: RELA relocation unsupported\n", me->name);
1897	return -ENOEXEC;
1898}
1899
1900static int apply_relocations(struct module *mod, const struct load_info *info)
1901{
1902	unsigned int i;
1903	int err = 0;
1904
1905	/* Now do relocations. */
1906	for (i = 1; i < info->hdr->e_shnum; i++) {
1907		unsigned int infosec = info->sechdrs[i].sh_info;
1908
1909		/* Not a valid relocation section? */
1910		if (infosec >= info->hdr->e_shnum)
1911			continue;
1912
1913		/* Don't bother with non-allocated sections */
1914		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
1915			continue;
1916
1917		if (info->sechdrs[i].sh_type == SHT_REL)
 
 
 
 
 
 
1918			err = apply_relocate(info->sechdrs, info->strtab,
1919					     info->index.sym, i, mod);
1920		else if (info->sechdrs[i].sh_type == SHT_RELA)
1921			err = apply_relocate_add(info->sechdrs, info->strtab,
1922						 info->index.sym, i, mod);
1923		if (err < 0)
1924			break;
1925	}
1926	return err;
1927}
1928
1929/* Additional bytes needed by arch in front of individual sections */
1930unsigned int __weak arch_mod_section_prepend(struct module *mod,
1931					     unsigned int section)
1932{
1933	/* default implementation just returns zero */
1934	return 0;
1935}
1936
1937/* Update size with this section: return offset. */
1938static long get_offset(struct module *mod, unsigned int *size,
1939		       Elf_Shdr *sechdr, unsigned int section)
1940{
1941	long ret;
1942
1943	*size += arch_mod_section_prepend(mod, section);
1944	ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
1945	*size = ret + sechdr->sh_size;
1946	return ret;
1947}
1948
1949/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
1950   might -- code, read-only data, read-write data, small data.  Tally
1951   sizes, and place the offsets into sh_entsize fields: high bit means it
1952   belongs in init. */
1953static void layout_sections(struct module *mod, struct load_info *info)
1954{
1955	static unsigned long const masks[][2] = {
1956		/* NOTE: all executable code must be the first section
1957		 * in this array; otherwise modify the text_size
1958		 * finder in the two loops below */
1959		{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
1960		{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
 
1961		{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
1962		{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
1963	};
1964	unsigned int m, i;
1965
1966	for (i = 0; i < info->hdr->e_shnum; i++)
1967		info->sechdrs[i].sh_entsize = ~0UL;
1968
1969	DEBUGP("Core section allocation order:\n");
1970	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
1971		for (i = 0; i < info->hdr->e_shnum; ++i) {
1972			Elf_Shdr *s = &info->sechdrs[i];
1973			const char *sname = info->secstrings + s->sh_name;
1974
1975			if ((s->sh_flags & masks[m][0]) != masks[m][0]
1976			    || (s->sh_flags & masks[m][1])
1977			    || s->sh_entsize != ~0UL
1978			    || strstarts(sname, ".init"))
1979				continue;
1980			s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
1981			DEBUGP("\t%s\n", name);
1982		}
1983		switch (m) {
1984		case 0: /* executable */
1985			mod->core_size = debug_align(mod->core_size);
1986			mod->core_text_size = mod->core_size;
1987			break;
1988		case 1: /* RO: text and ro-data */
1989			mod->core_size = debug_align(mod->core_size);
1990			mod->core_ro_size = mod->core_size;
 
 
 
 
1991			break;
1992		case 3: /* whole core */
1993			mod->core_size = debug_align(mod->core_size);
1994			break;
1995		}
1996	}
1997
1998	DEBUGP("Init section allocation order:\n");
1999	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2000		for (i = 0; i < info->hdr->e_shnum; ++i) {
2001			Elf_Shdr *s = &info->sechdrs[i];
2002			const char *sname = info->secstrings + s->sh_name;
2003
2004			if ((s->sh_flags & masks[m][0]) != masks[m][0]
2005			    || (s->sh_flags & masks[m][1])
2006			    || s->sh_entsize != ~0UL
2007			    || !strstarts(sname, ".init"))
2008				continue;
2009			s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2010					 | INIT_OFFSET_MASK);
2011			DEBUGP("\t%s\n", sname);
2012		}
2013		switch (m) {
2014		case 0: /* executable */
2015			mod->init_size = debug_align(mod->init_size);
2016			mod->init_text_size = mod->init_size;
2017			break;
2018		case 1: /* RO: text and ro-data */
2019			mod->init_size = debug_align(mod->init_size);
2020			mod->init_ro_size = mod->init_size;
2021			break;
2022		case 3: /* whole init */
2023			mod->init_size = debug_align(mod->init_size);
 
 
 
 
 
 
 
2024			break;
2025		}
2026	}
2027}
2028
2029static void set_license(struct module *mod, const char *license)
2030{
2031	if (!license)
2032		license = "unspecified";
2033
2034	if (!license_is_gpl_compatible(license)) {
2035		if (!test_taint(TAINT_PROPRIETARY_MODULE))
2036			printk(KERN_WARNING "%s: module license '%s' taints "
2037				"kernel.\n", mod->name, license);
2038		add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
 
2039	}
2040}
2041
2042/* Parse tag=value strings from .modinfo section */
2043static char *next_string(char *string, unsigned long *secsize)
2044{
2045	/* Skip non-zero chars */
2046	while (string[0]) {
2047		string++;
2048		if ((*secsize)-- <= 1)
2049			return NULL;
2050	}
2051
2052	/* Skip any zero padding. */
2053	while (!string[0]) {
2054		string++;
2055		if ((*secsize)-- <= 1)
2056			return NULL;
2057	}
2058	return string;
2059}
2060
2061static char *get_modinfo(struct load_info *info, const char *tag)
 
2062{
2063	char *p;
2064	unsigned int taglen = strlen(tag);
2065	Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2066	unsigned long size = infosec->sh_size;
2067
2068	for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
 
 
 
 
 
 
 
 
 
 
 
2069		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2070			return p + taglen + 1;
2071	}
2072	return NULL;
2073}
2074
 
 
 
 
 
2075static void setup_modinfo(struct module *mod, struct load_info *info)
2076{
2077	struct module_attribute *attr;
2078	int i;
2079
2080	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2081		if (attr->setup)
2082			attr->setup(mod, get_modinfo(info, attr->attr.name));
2083	}
2084}
2085
2086static void free_modinfo(struct module *mod)
2087{
2088	struct module_attribute *attr;
2089	int i;
2090
2091	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2092		if (attr->free)
2093			attr->free(mod);
2094	}
2095}
2096
2097#ifdef CONFIG_KALLSYMS
2098
2099/* lookup symbol in given range of kernel_symbols */
2100static const struct kernel_symbol *lookup_symbol(const char *name,
2101	const struct kernel_symbol *start,
2102	const struct kernel_symbol *stop)
2103{
2104	return bsearch(name, start, stop - start,
2105			sizeof(struct kernel_symbol), cmp_name);
2106}
2107
2108static int is_exported(const char *name, unsigned long value,
2109		       const struct module *mod)
2110{
2111	const struct kernel_symbol *ks;
2112	if (!mod)
2113		ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2114	else
2115		ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2116	return ks != NULL && ks->value == value;
 
2117}
2118
2119/* As per nm */
2120static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2121{
2122	const Elf_Shdr *sechdrs = info->sechdrs;
2123
2124	if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2125		if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2126			return 'v';
2127		else
2128			return 'w';
2129	}
2130	if (sym->st_shndx == SHN_UNDEF)
2131		return 'U';
2132	if (sym->st_shndx == SHN_ABS)
2133		return 'a';
2134	if (sym->st_shndx >= SHN_LORESERVE)
2135		return '?';
2136	if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2137		return 't';
2138	if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2139	    && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2140		if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2141			return 'r';
2142		else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2143			return 'g';
2144		else
2145			return 'd';
2146	}
2147	if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2148		if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2149			return 's';
2150		else
2151			return 'b';
2152	}
2153	if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2154		      ".debug")) {
2155		return 'n';
2156	}
2157	return '?';
2158}
2159
2160static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2161                           unsigned int shnum)
2162{
2163	const Elf_Shdr *sec;
2164
2165	if (src->st_shndx == SHN_UNDEF
2166	    || src->st_shndx >= shnum
2167	    || !src->st_name)
2168		return false;
2169
 
 
 
 
 
2170	sec = sechdrs + src->st_shndx;
2171	if (!(sec->sh_flags & SHF_ALLOC)
2172#ifndef CONFIG_KALLSYMS_ALL
2173	    || !(sec->sh_flags & SHF_EXECINSTR)
2174#endif
2175	    || (sec->sh_entsize & INIT_OFFSET_MASK))
2176		return false;
2177
2178	return true;
2179}
2180
 
 
 
 
 
 
 
2181static void layout_symtab(struct module *mod, struct load_info *info)
2182{
2183	Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2184	Elf_Shdr *strsect = info->sechdrs + info->index.str;
2185	const Elf_Sym *src;
2186	unsigned int i, nsrc, ndst;
2187
2188	/* Put symbol section at end of init part of module. */
2189	symsect->sh_flags |= SHF_ALLOC;
2190	symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2191					 info->index.sym) | INIT_OFFSET_MASK;
2192	DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
2193
2194	src = (void *)info->hdr + symsect->sh_offset;
2195	nsrc = symsect->sh_size / sizeof(*src);
2196	for (ndst = i = 1; i < nsrc; ++i, ++src)
2197		if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) {
2198			unsigned int j = src->st_name;
2199
2200			while (!__test_and_set_bit(j, info->strmap)
2201			       && info->strtab[j])
2202				++j;
2203			++ndst;
2204		}
 
2205
2206	/* Append room for core symbols at end of core part. */
2207	info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2208	mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
 
 
 
 
2209
2210	/* Put string table section at end of init part of module. */
2211	strsect->sh_flags |= SHF_ALLOC;
2212	strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2213					 info->index.str) | INIT_OFFSET_MASK;
2214	DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
2215
2216	/* Append room for core symbols' strings at end of core part. */
2217	info->stroffs = mod->core_size;
2218	__set_bit(0, info->strmap);
2219	mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
 
 
 
 
2220}
2221
 
 
 
 
 
2222static void add_kallsyms(struct module *mod, const struct load_info *info)
2223{
2224	unsigned int i, ndst;
2225	const Elf_Sym *src;
2226	Elf_Sym *dst;
2227	char *s;
2228	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2229
2230	mod->symtab = (void *)symsec->sh_addr;
2231	mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
 
 
 
2232	/* Make sure we get permanent strtab: don't use info->strtab. */
2233	mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
2234
2235	/* Set types up while we still have access to sections. */
2236	for (i = 0; i < mod->num_symtab; i++)
2237		mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2238
2239	mod->core_symtab = dst = mod->module_core + info->symoffs;
2240	src = mod->symtab;
2241	*dst = *src;
2242	for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
2243		if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum))
2244			continue;
2245		dst[ndst] = *src;
2246		dst[ndst].st_name = bitmap_weight(info->strmap,
2247						  dst[ndst].st_name);
2248		++ndst;
2249	}
2250	mod->core_num_syms = ndst;
2251
2252	mod->core_strtab = s = mod->module_core + info->stroffs;
2253	for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
2254		if (test_bit(i, info->strmap))
2255			*++s = mod->strtab[i];
 
2256}
2257#else
2258static inline void layout_symtab(struct module *mod, struct load_info *info)
2259{
2260}
2261
2262static void add_kallsyms(struct module *mod, const struct load_info *info)
2263{
2264}
2265#endif /* CONFIG_KALLSYMS */
2266
2267static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2268{
2269	if (!debug)
2270		return;
2271#ifdef CONFIG_DYNAMIC_DEBUG
2272	if (ddebug_add_module(debug, num, debug->modname))
2273		printk(KERN_ERR "dynamic debug error adding module: %s\n",
2274					debug->modname);
2275#endif
2276}
2277
2278static void dynamic_debug_remove(struct _ddebug *debug)
2279{
2280	if (debug)
2281		ddebug_remove_module(debug->modname);
2282}
2283
2284void * __weak module_alloc(unsigned long size)
2285{
2286	return size == 0 ? NULL : vmalloc_exec(size);
 
 
2287}
2288
2289static void *module_alloc_update_bounds(unsigned long size)
2290{
2291	void *ret = module_alloc(size);
 
2292
2293	if (ret) {
2294		mutex_lock(&module_mutex);
2295		/* Update module bounds. */
2296		if ((unsigned long)ret < module_addr_min)
2297			module_addr_min = (unsigned long)ret;
2298		if ((unsigned long)ret + size > module_addr_max)
2299			module_addr_max = (unsigned long)ret + size;
2300		mutex_unlock(&module_mutex);
2301	}
2302	return ret;
2303}
2304
2305#ifdef CONFIG_DEBUG_KMEMLEAK
2306static void kmemleak_load_module(const struct module *mod,
2307				 const struct load_info *info)
2308{
2309	unsigned int i;
2310
2311	/* only scan the sections containing data */
2312	kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2313
2314	for (i = 1; i < info->hdr->e_shnum; i++) {
2315		const char *name = info->secstrings + info->sechdrs[i].sh_name;
2316		if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
2317			continue;
2318		if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
2319			continue;
2320
2321		kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2322				   info->sechdrs[i].sh_size, GFP_KERNEL);
2323	}
2324}
2325#else
2326static inline void kmemleak_load_module(const struct module *mod,
2327					const struct load_info *info)
2328{
2329}
2330#endif
2331
2332/* Sets info->hdr and info->len. */
2333static int copy_and_check(struct load_info *info,
2334			  const void __user *umod, unsigned long len,
2335			  const char __user *uargs)
2336{
2337	int err;
2338	Elf_Ehdr *hdr;
 
 
2339
2340	if (len < sizeof(*hdr))
2341		return -ENOEXEC;
 
 
 
 
 
 
 
 
 
2342
2343	/* Suck in entire file: we'll want most of it. */
2344	/* vmalloc barfs on "unusual" numbers.  Check here */
2345	if (len > 64 * 1024 * 1024 || (hdr = vmalloc(len)) == NULL)
2346		return -ENOMEM;
2347
2348	if (copy_from_user(hdr, umod, len) != 0) {
2349		err = -EFAULT;
2350		goto free_hdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2351	}
 
 
 
 
 
 
 
2352
2353	/* Sanity checks against insmoding binaries or wrong arch,
2354	   weird elf version */
2355	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
2356	    || hdr->e_type != ET_REL
2357	    || !elf_check_arch(hdr)
2358	    || hdr->e_shentsize != sizeof(Elf_Shdr)) {
2359		err = -ENOEXEC;
2360		goto free_hdr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2361	}
2362
2363	if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
2364		err = -ENOEXEC;
2365		goto free_hdr;
 
 
 
 
 
 
2366	}
2367
2368	info->hdr = hdr;
2369	info->len = len;
2370	return 0;
 
 
2371
2372free_hdr:
2373	vfree(hdr);
2374	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2375}
2376
2377static void free_copy(struct load_info *info)
2378{
2379	vfree(info->hdr);
2380}
2381
2382static int rewrite_section_headers(struct load_info *info)
2383{
2384	unsigned int i;
2385
2386	/* This should always be true, but let's be sure. */
2387	info->sechdrs[0].sh_addr = 0;
2388
2389	for (i = 1; i < info->hdr->e_shnum; i++) {
2390		Elf_Shdr *shdr = &info->sechdrs[i];
2391		if (shdr->sh_type != SHT_NOBITS
2392		    && info->len < shdr->sh_offset + shdr->sh_size) {
2393			printk(KERN_ERR "Module len %lu truncated\n",
2394			       info->len);
2395			return -ENOEXEC;
2396		}
2397
2398		/* Mark all sections sh_addr with their address in the
2399		   temporary image. */
2400		shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2401
2402#ifndef CONFIG_MODULE_UNLOAD
2403		/* Don't load .exit sections */
2404		if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2405			shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2406#endif
2407	}
2408
2409	/* Track but don't keep modinfo and version sections. */
2410	info->index.vers = find_sec(info, "__versions");
2411	info->index.info = find_sec(info, ".modinfo");
2412	info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2413	info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
 
 
2414	return 0;
2415}
2416
2417/*
2418 * Set up our basic convenience variables (pointers to section headers,
2419 * search for module section index etc), and do some basic section
2420 * verification.
2421 *
2422 * Return the temporary module pointer (we'll replace it with the final
2423 * one when we move the module sections around).
2424 */
2425static struct module *setup_load_info(struct load_info *info)
2426{
2427	unsigned int i;
2428	int err;
2429	struct module *mod;
2430
2431	/* Set up the convenience variables */
2432	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2433	info->secstrings = (void *)info->hdr
2434		+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2435
2436	err = rewrite_section_headers(info);
2437	if (err)
2438		return ERR_PTR(err);
 
2439
2440	/* Find internal symbols and strings. */
2441	for (i = 1; i < info->hdr->e_shnum; i++) {
2442		if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2443			info->index.sym = i;
2444			info->index.str = info->sechdrs[i].sh_link;
2445			info->strtab = (char *)info->hdr
2446				+ info->sechdrs[info->index.str].sh_offset;
2447			break;
2448		}
2449	}
2450
 
 
 
 
 
 
2451	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2452	if (!info->index.mod) {
2453		printk(KERN_WARNING "No module found in object\n");
2454		return ERR_PTR(-ENOEXEC);
 
2455	}
2456	/* This is temporary: point mod into copy of data. */
2457	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2458
2459	if (info->index.sym == 0) {
2460		printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
2461		       mod->name);
2462		return ERR_PTR(-ENOEXEC);
2463	}
 
2464
2465	info->index.pcpu = find_pcpusec(info);
 
 
 
2466
2467	/* Check module struct version now, before we try to use module. */
2468	if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2469		return ERR_PTR(-ENOEXEC);
2470
2471	return mod;
2472}
2473
2474static int check_modinfo(struct module *mod, struct load_info *info)
2475{
2476	const char *modmagic = get_modinfo(info, "vermagic");
2477	int err;
2478
 
 
 
2479	/* This is allowed: modprobe --force will invalidate it. */
2480	if (!modmagic) {
2481		err = try_to_force_load(mod, "bad vermagic");
2482		if (err)
2483			return err;
2484	} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2485		printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
2486		       mod->name, modmagic, vermagic);
2487		return -ENOEXEC;
2488	}
2489
 
 
 
 
 
 
 
 
 
2490	if (get_modinfo(info, "staging")) {
2491		add_taint_module(mod, TAINT_CRAP);
2492		printk(KERN_WARNING "%s: module is from the staging directory,"
2493		       " the quality is unknown, you have been warned.\n",
2494		       mod->name);
2495	}
2496
 
 
 
 
2497	/* Set up license info based on the info section */
2498	set_license(mod, get_modinfo(info, "license"));
2499
2500	return 0;
2501}
2502
2503static void find_module_sections(struct module *mod, struct load_info *info)
2504{
2505	mod->kp = section_objs(info, "__param",
2506			       sizeof(*mod->kp), &mod->num_kp);
2507	mod->syms = section_objs(info, "__ksymtab",
2508				 sizeof(*mod->syms), &mod->num_syms);
2509	mod->crcs = section_addr(info, "__kcrctab");
2510	mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2511				     sizeof(*mod->gpl_syms),
2512				     &mod->num_gpl_syms);
2513	mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2514	mod->gpl_future_syms = section_objs(info,
2515					    "__ksymtab_gpl_future",
2516					    sizeof(*mod->gpl_future_syms),
2517					    &mod->num_gpl_future_syms);
2518	mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2519
2520#ifdef CONFIG_UNUSED_SYMBOLS
2521	mod->unused_syms = section_objs(info, "__ksymtab_unused",
2522					sizeof(*mod->unused_syms),
2523					&mod->num_unused_syms);
2524	mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2525	mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2526					    sizeof(*mod->unused_gpl_syms),
2527					    &mod->num_unused_gpl_syms);
2528	mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2529#endif
2530#ifdef CONFIG_CONSTRUCTORS
2531	mod->ctors = section_objs(info, ".ctors",
2532				  sizeof(*mod->ctors), &mod->num_ctors);
 
 
 
 
 
 
 
 
 
 
 
 
2533#endif
2534
 
 
 
2535#ifdef CONFIG_TRACEPOINTS
2536	mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2537					     sizeof(*mod->tracepoints_ptrs),
2538					     &mod->num_tracepoints);
2539#endif
2540#ifdef HAVE_JUMP_LABEL
 
 
 
 
 
 
 
 
 
 
2541	mod->jump_entries = section_objs(info, "__jump_table",
2542					sizeof(*mod->jump_entries),
2543					&mod->num_jump_entries);
2544#endif
2545#ifdef CONFIG_EVENT_TRACING
2546	mod->trace_events = section_objs(info, "_ftrace_events",
2547					 sizeof(*mod->trace_events),
2548					 &mod->num_trace_events);
2549	/*
2550	 * This section contains pointers to allocated objects in the trace
2551	 * code and not scanning it leads to false positives.
2552	 */
2553	kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2554			   mod->num_trace_events, GFP_KERNEL);
2555#endif
2556#ifdef CONFIG_TRACING
2557	mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2558					 sizeof(*mod->trace_bprintk_fmt_start),
2559					 &mod->num_trace_bprintk_fmt);
2560	/*
2561	 * This section contains pointers to allocated objects in the trace
2562	 * code and not scanning it leads to false positives.
2563	 */
2564	kmemleak_scan_area(mod->trace_bprintk_fmt_start,
2565			   sizeof(*mod->trace_bprintk_fmt_start) *
2566			   mod->num_trace_bprintk_fmt, GFP_KERNEL);
2567#endif
2568#ifdef CONFIG_FTRACE_MCOUNT_RECORD
2569	/* sechdrs[0].sh_size is always zero */
2570	mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2571					     sizeof(*mod->ftrace_callsites),
2572					     &mod->num_ftrace_callsites);
2573#endif
2574
 
 
 
 
 
 
 
 
 
 
 
2575	mod->extable = section_objs(info, "__ex_table",
2576				    sizeof(*mod->extable), &mod->num_exentries);
2577
2578	if (section_addr(info, "__obsparm"))
2579		printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2580		       mod->name);
2581
2582	info->debug = section_objs(info, "__verbose",
2583				   sizeof(*info->debug), &info->num_debug);
 
 
2584}
2585
2586static int move_module(struct module *mod, struct load_info *info)
2587{
2588	int i;
2589	void *ptr;
2590
2591	/* Do the allocs. */
2592	ptr = module_alloc_update_bounds(mod->core_size);
2593	/*
2594	 * The pointer to this block is stored in the module structure
2595	 * which is inside the block. Just mark it as not being a
2596	 * leak.
2597	 */
2598	kmemleak_not_leak(ptr);
2599	if (!ptr)
2600		return -ENOMEM;
2601
2602	memset(ptr, 0, mod->core_size);
2603	mod->module_core = ptr;
2604
2605	ptr = module_alloc_update_bounds(mod->init_size);
2606	/*
2607	 * The pointer to this block is stored in the module structure
2608	 * which is inside the block. This block doesn't need to be
2609	 * scanned as it contains data and code that will be freed
2610	 * after the module is initialized.
2611	 */
2612	kmemleak_ignore(ptr);
2613	if (!ptr && mod->init_size) {
2614		module_free(mod, mod->module_core);
2615		return -ENOMEM;
2616	}
2617	memset(ptr, 0, mod->init_size);
2618	mod->module_init = ptr;
 
 
 
2619
2620	/* Transfer each section which specifies SHF_ALLOC */
2621	DEBUGP("final section addresses:\n");
2622	for (i = 0; i < info->hdr->e_shnum; i++) {
2623		void *dest;
2624		Elf_Shdr *shdr = &info->sechdrs[i];
2625
2626		if (!(shdr->sh_flags & SHF_ALLOC))
2627			continue;
2628
2629		if (shdr->sh_entsize & INIT_OFFSET_MASK)
2630			dest = mod->module_init
2631				+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2632		else
2633			dest = mod->module_core + shdr->sh_entsize;
2634
2635		if (shdr->sh_type != SHT_NOBITS)
2636			memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2637		/* Update sh_addr to point to copy in image. */
2638		shdr->sh_addr = (unsigned long)dest;
2639		DEBUGP("\t0x%lx %s\n",
2640		       shdr->sh_addr, info->secstrings + shdr->sh_name);
2641	}
2642
2643	return 0;
2644}
2645
2646static int check_module_license_and_versions(struct module *mod)
2647{
 
 
2648	/*
2649	 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2650	 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2651	 * using GPL-only symbols it needs.
2652	 */
2653	if (strcmp(mod->name, "ndiswrapper") == 0)
2654		add_taint(TAINT_PROPRIETARY_MODULE);
2655
2656	/* driverloader was caught wrongly pretending to be under GPL */
2657	if (strcmp(mod->name, "driverloader") == 0)
2658		add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
 
 
 
 
 
 
 
 
 
2659
2660#ifdef CONFIG_MODVERSIONS
2661	if ((mod->num_syms && !mod->crcs)
2662	    || (mod->num_gpl_syms && !mod->gpl_crcs)
2663	    || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2664#ifdef CONFIG_UNUSED_SYMBOLS
2665	    || (mod->num_unused_syms && !mod->unused_crcs)
2666	    || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2667#endif
2668		) {
2669		return try_to_force_load(mod,
2670					 "no versions for exported symbols");
2671	}
2672#endif
2673	return 0;
2674}
2675
2676static void flush_module_icache(const struct module *mod)
2677{
2678	mm_segment_t old_fs;
2679
2680	/* flush the icache in correct context */
2681	old_fs = get_fs();
2682	set_fs(KERNEL_DS);
2683
2684	/*
2685	 * Flush the instruction cache, since we've played with text.
2686	 * Do it before processing of module parameters, so the module
2687	 * can provide parameter accessor functions of its own.
2688	 */
2689	if (mod->module_init)
2690		flush_icache_range((unsigned long)mod->module_init,
2691				   (unsigned long)mod->module_init
2692				   + mod->init_size);
2693	flush_icache_range((unsigned long)mod->module_core,
2694			   (unsigned long)mod->module_core + mod->core_size);
2695
2696	set_fs(old_fs);
2697}
2698
2699int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2700				     Elf_Shdr *sechdrs,
2701				     char *secstrings,
2702				     struct module *mod)
2703{
2704	return 0;
2705}
2706
2707static struct module *layout_and_allocate(struct load_info *info)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2708{
2709	/* Module within temporary copy. */
2710	struct module *mod;
2711	Elf_Shdr *pcpusec;
2712	int err;
2713
2714	mod = setup_load_info(info);
2715	if (IS_ERR(mod))
2716		return mod;
2717
2718	err = check_modinfo(mod, info);
2719	if (err)
2720		return ERR_PTR(err);
2721
2722	/* Allow arches to frob section contents and sizes.  */
2723	err = module_frob_arch_sections(info->hdr, info->sechdrs,
2724					info->secstrings, mod);
2725	if (err < 0)
2726		goto out;
2727
2728	pcpusec = &info->sechdrs[info->index.pcpu];
2729	if (pcpusec->sh_size) {
2730		/* We have a special allocation for this section. */
2731		err = percpu_modalloc(mod,
2732				      pcpusec->sh_size, pcpusec->sh_addralign);
2733		if (err)
2734			goto out;
2735		pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
2736	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2737
2738	/* Determine total sizes, and put offsets in sh_entsize.  For now
2739	   this is done generically; there doesn't appear to be any
2740	   special cases for the architectures. */
2741	layout_sections(mod, info);
2742
2743	info->strmap = kzalloc(BITS_TO_LONGS(info->sechdrs[info->index.str].sh_size)
2744			 * sizeof(long), GFP_KERNEL);
2745	if (!info->strmap) {
2746		err = -ENOMEM;
2747		goto free_percpu;
2748	}
2749	layout_symtab(mod, info);
2750
2751	/* Allocate and move to the final place */
2752	err = move_module(mod, info);
2753	if (err)
2754		goto free_strmap;
2755
2756	/* Module has been copied to its final place now: return it. */
2757	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2758	kmemleak_load_module(mod, info);
2759	return mod;
2760
2761free_strmap:
2762	kfree(info->strmap);
2763free_percpu:
2764	percpu_modfree(mod);
2765out:
2766	return ERR_PTR(err);
2767}
2768
2769/* mod is no longer valid after this! */
2770static void module_deallocate(struct module *mod, struct load_info *info)
2771{
2772	kfree(info->strmap);
2773	percpu_modfree(mod);
2774	module_free(mod, mod->module_init);
2775	module_free(mod, mod->module_core);
 
2776}
2777
2778int __weak module_finalize(const Elf_Ehdr *hdr,
2779			   const Elf_Shdr *sechdrs,
2780			   struct module *me)
2781{
2782	return 0;
2783}
2784
2785static int post_relocation(struct module *mod, const struct load_info *info)
2786{
2787	/* Sort exception table now relocations are done. */
2788	sort_extable(mod->extable, mod->extable + mod->num_exentries);
2789
2790	/* Copy relocated percpu area over. */
2791	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2792		       info->sechdrs[info->index.pcpu].sh_size);
2793
2794	/* Setup kallsyms-specific fields. */
2795	add_kallsyms(mod, info);
2796
2797	/* Arch-specific module finalizing. */
2798	return module_finalize(info->hdr, info->sechdrs, mod);
2799}
2800
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2801/* Allocate and load the module: note that size of section 0 is always
2802   zero, and we rely on this for optional sections. */
2803static struct module *load_module(void __user *umod,
2804				  unsigned long len,
2805				  const char __user *uargs)
2806{
2807	struct load_info info = { NULL, };
2808	struct module *mod;
2809	long err;
 
2810
2811	DEBUGP("load_module: umod=%p, len=%lu, uargs=%p\n",
2812	       umod, len, uargs);
 
2813
2814	/* Copy in the blobs from userspace, check they are vaguely sane. */
2815	err = copy_and_check(&info, umod, len, uargs);
2816	if (err)
2817		return ERR_PTR(err);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2818
2819	/* Figure out module layout, and allocate all the memory. */
2820	mod = layout_and_allocate(&info);
2821	if (IS_ERR(mod)) {
2822		err = PTR_ERR(mod);
2823		goto free_copy;
2824	}
2825
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2826	/* Now module is in final location, initialize linked lists, etc. */
2827	err = module_unload_init(mod);
2828	if (err)
2829		goto free_module;
 
 
2830
2831	/* Now we've got everything in the final locations, we can
2832	 * find optional sections. */
2833	find_module_sections(mod, &info);
 
 
2834
2835	err = check_module_license_and_versions(mod);
2836	if (err)
2837		goto free_unload;
2838
2839	/* Set up MODINFO_ATTR fields */
2840	setup_modinfo(mod, &info);
2841
2842	/* Fix up syms, so that st_value is a pointer to location. */
2843	err = simplify_symbols(mod, &info);
2844	if (err < 0)
2845		goto free_modinfo;
2846
2847	err = apply_relocations(mod, &info);
2848	if (err < 0)
2849		goto free_modinfo;
2850
2851	err = post_relocation(mod, &info);
2852	if (err < 0)
2853		goto free_modinfo;
2854
2855	flush_module_icache(mod);
2856
2857	/* Now copy in args */
2858	mod->args = strndup_user(uargs, ~0UL >> 1);
2859	if (IS_ERR(mod->args)) {
2860		err = PTR_ERR(mod->args);
2861		goto free_arch_cleanup;
2862	}
2863
2864	/* Mark state as coming so strong_try_module_get() ignores us. */
2865	mod->state = MODULE_STATE_COMING;
2866
2867	/* Now sew it into the lists so we can get lockdep and oops
2868	 * info during argument parsing.  No one should access us, since
2869	 * strong_try_module_get() will fail.
2870	 * lockdep/oops can run asynchronous, so use the RCU list insertion
2871	 * function to insert in a way safe to concurrent readers.
2872	 * The mutex protects against concurrent writers.
2873	 */
2874	mutex_lock(&module_mutex);
2875	if (find_module(mod->name)) {
2876		err = -EEXIST;
2877		goto unlock;
2878	}
2879
2880	/* This has to be done once we're sure module name is unique. */
2881	if (!mod->taints || mod->taints == (1U<<TAINT_CRAP))
2882		dynamic_debug_setup(info.debug, info.num_debug);
2883
2884	/* Find duplicate symbols */
2885	err = verify_export_symbols(mod);
2886	if (err < 0)
2887		goto ddebug;
2888
2889	module_bug_finalize(info.hdr, info.sechdrs, mod);
2890	list_add_rcu(&mod->list, &modules);
2891	mutex_unlock(&module_mutex);
2892
2893	/* Module is ready to execute: parsing args may do that. */
2894	err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, NULL);
2895	if (err < 0)
2896		goto unlink;
 
 
 
 
 
 
 
2897
2898	/* Link in to syfs. */
2899	err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp);
2900	if (err < 0)
2901		goto unlink;
 
 
 
 
 
 
2902
2903	/* Get rid of temporary copy and strmap. */
2904	kfree(info.strmap);
2905	free_copy(&info);
2906
2907	/* Done! */
2908	trace_module_load(mod);
2909	return mod;
2910
2911 unlink:
 
 
 
 
 
 
 
 
 
 
 
2912	mutex_lock(&module_mutex);
2913	/* Unlink carefully: kallsyms could be walking list. */
2914	list_del_rcu(&mod->list);
2915	module_bug_cleanup(mod);
2916
2917 ddebug:
2918	if (!mod->taints || mod->taints == (1U<<TAINT_CRAP))
2919		dynamic_debug_remove(info.debug);
2920 unlock:
2921	mutex_unlock(&module_mutex);
2922	synchronize_sched();
 
 
 
 
2923	kfree(mod->args);
2924 free_arch_cleanup:
2925	module_arch_cleanup(mod);
2926 free_modinfo:
2927	free_modinfo(mod);
2928 free_unload:
2929	module_unload_free(mod);
 
 
 
 
 
 
 
 
 
2930 free_module:
2931	module_deallocate(mod, &info);
2932 free_copy:
2933	free_copy(&info);
2934	return ERR_PTR(err);
2935}
2936
2937/* Call module constructors. */
2938static void do_mod_ctors(struct module *mod)
2939{
2940#ifdef CONFIG_CONSTRUCTORS
2941	unsigned long i;
2942
2943	for (i = 0; i < mod->num_ctors; i++)
2944		mod->ctors[i]();
2945#endif
 
2946}
2947
2948/* This is where the real work happens */
2949SYSCALL_DEFINE3(init_module, void __user *, umod,
2950		unsigned long, len, const char __user *, uargs)
2951{
2952	struct module *mod;
2953	int ret = 0;
2954
2955	/* Must have permission */
2956	if (!capable(CAP_SYS_MODULE) || modules_disabled)
2957		return -EPERM;
2958
2959	/* Do all the hard work */
2960	mod = load_module(umod, len, uargs);
2961	if (IS_ERR(mod))
2962		return PTR_ERR(mod);
2963
2964	blocking_notifier_call_chain(&module_notify_list,
2965			MODULE_STATE_COMING, mod);
 
2966
2967	/* Set RO and NX regions for core */
2968	set_section_ro_nx(mod->module_core,
2969				mod->core_text_size,
2970				mod->core_ro_size,
2971				mod->core_size);
2972
2973	/* Set RO and NX regions for init */
2974	set_section_ro_nx(mod->module_init,
2975				mod->init_text_size,
2976				mod->init_ro_size,
2977				mod->init_size);
2978
2979	do_mod_ctors(mod);
2980	/* Start the module */
2981	if (mod->init != NULL)
2982		ret = do_one_initcall(mod->init);
2983	if (ret < 0) {
2984		/* Init routine failed: abort.  Try to protect us from
2985                   buggy refcounters. */
2986		mod->state = MODULE_STATE_GOING;
2987		synchronize_sched();
2988		module_put(mod);
2989		blocking_notifier_call_chain(&module_notify_list,
2990					     MODULE_STATE_GOING, mod);
2991		free_module(mod);
2992		wake_up(&module_wq);
2993		return ret;
2994	}
2995	if (ret > 0) {
2996		printk(KERN_WARNING
2997"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
2998"%s: loading module anyway...\n",
2999		       __func__, mod->name, ret,
3000		       __func__);
3001		dump_stack();
3002	}
3003
3004	/* Now it's a first class citizen!  Wake up anyone waiting for it. */
3005	mod->state = MODULE_STATE_LIVE;
3006	wake_up(&module_wq);
3007	blocking_notifier_call_chain(&module_notify_list,
3008				     MODULE_STATE_LIVE, mod);
3009
3010	/* We need to finish all async code before the module init sequence is done */
3011	async_synchronize_full();
3012
3013	mutex_lock(&module_mutex);
3014	/* Drop initial reference. */
3015	module_put(mod);
3016	trim_init_extable(mod);
3017#ifdef CONFIG_KALLSYMS
3018	mod->num_symtab = mod->core_num_syms;
3019	mod->symtab = mod->core_symtab;
3020	mod->strtab = mod->core_strtab;
3021#endif
3022	unset_module_init_ro_nx(mod);
3023	module_free(mod, mod->module_init);
3024	mod->module_init = NULL;
3025	mod->init_size = 0;
3026	mod->init_ro_size = 0;
3027	mod->init_text_size = 0;
3028	mutex_unlock(&module_mutex);
3029
3030	return 0;
 
 
 
 
 
 
 
3031}
3032
3033static inline int within(unsigned long addr, void *start, unsigned long size)
3034{
3035	return ((void *)addr >= start && (void *)addr < start + size);
3036}
3037
3038#ifdef CONFIG_KALLSYMS
3039/*
3040 * This ignores the intensely annoying "mapping symbols" found
3041 * in ARM ELF files: $a, $t and $d.
3042 */
3043static inline int is_arm_mapping_symbol(const char *str)
3044{
3045	return str[0] == '$' && strchr("atd", str[1])
 
 
3046	       && (str[2] == '\0' || str[2] == '.');
3047}
3048
3049static const char *get_ksymbol(struct module *mod,
3050			       unsigned long addr,
3051			       unsigned long *size,
3052			       unsigned long *offset)
 
 
 
 
 
 
 
 
 
3053{
3054	unsigned int i, best = 0;
3055	unsigned long nextval;
 
3056
3057	/* At worse, next value is at end of module */
3058	if (within_module_init(addr, mod))
3059		nextval = (unsigned long)mod->module_init+mod->init_text_size;
3060	else
3061		nextval = (unsigned long)mod->module_core+mod->core_text_size;
 
 
3062
3063	/* Scan for closest preceding symbol, and next symbol. (ELF
3064	   starts real symbols at 1). */
3065	for (i = 1; i < mod->num_symtab; i++) {
3066		if (mod->symtab[i].st_shndx == SHN_UNDEF)
 
 
 
3067			continue;
3068
3069		/* We ignore unnamed symbols: they're uninformative
3070		 * and inserted at a whim. */
3071		if (mod->symtab[i].st_value <= addr
3072		    && mod->symtab[i].st_value > mod->symtab[best].st_value
3073		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3074		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
 
3075			best = i;
3076		if (mod->symtab[i].st_value > addr
3077		    && mod->symtab[i].st_value < nextval
3078		    && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3079		    && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3080			nextval = mod->symtab[i].st_value;
3081	}
3082
3083	if (!best)
3084		return NULL;
3085
3086	if (size)
3087		*size = nextval - mod->symtab[best].st_value;
3088	if (offset)
3089		*offset = addr - mod->symtab[best].st_value;
3090	return mod->strtab + mod->symtab[best].st_name;
 
 
 
 
 
 
 
3091}
3092
3093/* For kallsyms to ask for address resolution.  NULL means not found.  Careful
3094 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3095const char *module_address_lookup(unsigned long addr,
3096			    unsigned long *size,
3097			    unsigned long *offset,
3098			    char **modname,
3099			    char *namebuf)
3100{
3101	struct module *mod;
3102	const char *ret = NULL;
 
3103
3104	preempt_disable();
3105	list_for_each_entry_rcu(mod, &modules, list) {
3106		if (within_module_init(addr, mod) ||
3107		    within_module_core(addr, mod)) {
3108			if (modname)
3109				*modname = mod->name;
3110			ret = get_ksymbol(mod, addr, size, offset);
3111			break;
3112		}
3113	}
3114	/* Make a copy in here where it's safe */
3115	if (ret) {
3116		strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3117		ret = namebuf;
3118	}
3119	preempt_enable();
 
3120	return ret;
3121}
3122
3123int lookup_module_symbol_name(unsigned long addr, char *symname)
3124{
3125	struct module *mod;
3126
3127	preempt_disable();
3128	list_for_each_entry_rcu(mod, &modules, list) {
3129		if (within_module_init(addr, mod) ||
3130		    within_module_core(addr, mod)) {
 
3131			const char *sym;
3132
3133			sym = get_ksymbol(mod, addr, NULL, NULL);
3134			if (!sym)
3135				goto out;
 
3136			strlcpy(symname, sym, KSYM_NAME_LEN);
3137			preempt_enable();
3138			return 0;
3139		}
3140	}
3141out:
3142	preempt_enable();
3143	return -ERANGE;
3144}
3145
3146int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3147			unsigned long *offset, char *modname, char *name)
3148{
3149	struct module *mod;
3150
3151	preempt_disable();
3152	list_for_each_entry_rcu(mod, &modules, list) {
3153		if (within_module_init(addr, mod) ||
3154		    within_module_core(addr, mod)) {
 
3155			const char *sym;
3156
3157			sym = get_ksymbol(mod, addr, size, offset);
3158			if (!sym)
3159				goto out;
3160			if (modname)
3161				strlcpy(modname, mod->name, MODULE_NAME_LEN);
3162			if (name)
3163				strlcpy(name, sym, KSYM_NAME_LEN);
3164			preempt_enable();
3165			return 0;
3166		}
3167	}
3168out:
3169	preempt_enable();
3170	return -ERANGE;
3171}
3172
3173int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3174			char *name, char *module_name, int *exported)
3175{
3176	struct module *mod;
3177
3178	preempt_disable();
3179	list_for_each_entry_rcu(mod, &modules, list) {
3180		if (symnum < mod->num_symtab) {
3181			*value = mod->symtab[symnum].st_value;
3182			*type = mod->symtab[symnum].st_info;
3183			strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3184				KSYM_NAME_LEN);
 
 
 
 
 
 
3185			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3186			*exported = is_exported(name, *value, mod);
3187			preempt_enable();
3188			return 0;
3189		}
3190		symnum -= mod->num_symtab;
3191	}
3192	preempt_enable();
3193	return -ERANGE;
3194}
3195
3196static unsigned long mod_find_symname(struct module *mod, const char *name)
 
3197{
3198	unsigned int i;
 
 
 
 
3199
3200	for (i = 0; i < mod->num_symtab; i++)
3201		if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3202		    mod->symtab[i].st_info != 'U')
3203			return mod->symtab[i].st_value;
3204	return 0;
3205}
3206
3207/* Look for this name: can be of form module:name. */
3208unsigned long module_kallsyms_lookup_name(const char *name)
3209{
3210	struct module *mod;
3211	char *colon;
3212	unsigned long ret = 0;
3213
3214	/* Don't lock: we're in enough trouble already. */
3215	preempt_disable();
3216	if ((colon = strchr(name, ':')) != NULL) {
3217		*colon = '\0';
3218		if ((mod = find_module(name)) != NULL)
3219			ret = mod_find_symname(mod, colon+1);
3220		*colon = ':';
3221	} else {
3222		list_for_each_entry_rcu(mod, &modules, list)
3223			if ((ret = mod_find_symname(mod, name)) != 0)
 
 
3224				break;
 
3225	}
3226	preempt_enable();
3227	return ret;
3228}
3229
3230int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3231					     struct module *, unsigned long),
3232				   void *data)
3233{
3234	struct module *mod;
3235	unsigned int i;
3236	int ret;
3237
 
 
3238	list_for_each_entry(mod, &modules, list) {
3239		for (i = 0; i < mod->num_symtab; i++) {
3240			ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3241				 mod, mod->symtab[i].st_value);
 
 
 
 
 
 
 
 
 
 
3242			if (ret != 0)
3243				return ret;
3244		}
3245	}
3246	return 0;
3247}
3248#endif /* CONFIG_KALLSYMS */
3249
 
 
 
 
3250static char *module_flags(struct module *mod, char *buf)
3251{
3252	int bx = 0;
3253
 
3254	if (mod->taints ||
3255	    mod->state == MODULE_STATE_GOING ||
3256	    mod->state == MODULE_STATE_COMING) {
3257		buf[bx++] = '(';
3258		if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
3259			buf[bx++] = 'P';
3260		if (mod->taints & (1 << TAINT_FORCED_MODULE))
3261			buf[bx++] = 'F';
3262		if (mod->taints & (1 << TAINT_CRAP))
3263			buf[bx++] = 'C';
3264		/*
3265		 * TAINT_FORCED_RMMOD: could be added.
3266		 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
3267		 * apply to modules.
3268		 */
3269
3270		/* Show a - for module-is-being-unloaded */
3271		if (mod->state == MODULE_STATE_GOING)
3272			buf[bx++] = '-';
3273		/* Show a + for module-is-being-loaded */
3274		if (mod->state == MODULE_STATE_COMING)
3275			buf[bx++] = '+';
3276		buf[bx++] = ')';
3277	}
3278	buf[bx] = '\0';
3279
3280	return buf;
3281}
3282
3283#ifdef CONFIG_PROC_FS
3284/* Called by the /proc file system to return a list of modules. */
3285static void *m_start(struct seq_file *m, loff_t *pos)
3286{
3287	mutex_lock(&module_mutex);
3288	return seq_list_start(&modules, *pos);
3289}
3290
3291static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3292{
3293	return seq_list_next(p, &modules, pos);
3294}
3295
3296static void m_stop(struct seq_file *m, void *p)
3297{
3298	mutex_unlock(&module_mutex);
3299}
3300
3301static int m_show(struct seq_file *m, void *p)
3302{
3303	struct module *mod = list_entry(p, struct module, list);
3304	char buf[8];
 
 
 
 
 
3305
3306	seq_printf(m, "%s %u",
3307		   mod->name, mod->init_size + mod->core_size);
3308	print_unload_info(m, mod);
3309
3310	/* Informative for users. */
3311	seq_printf(m, " %s",
3312		   mod->state == MODULE_STATE_GOING ? "Unloading":
3313		   mod->state == MODULE_STATE_COMING ? "Loading":
3314		   "Live");
3315	/* Used by oprofile and other similar tools. */
3316	seq_printf(m, " 0x%pK", mod->module_core);
 
3317
3318	/* Taints info */
3319	if (mod->taints)
3320		seq_printf(m, " %s", module_flags(mod, buf));
3321
3322	seq_printf(m, "\n");
3323	return 0;
3324}
3325
3326/* Format: modulename size refcount deps address
3327
3328   Where refcount is a number or -, and deps is a comma-separated list
3329   of depends or -.
3330*/
3331static const struct seq_operations modules_op = {
3332	.start	= m_start,
3333	.next	= m_next,
3334	.stop	= m_stop,
3335	.show	= m_show
3336};
3337
 
 
 
 
 
 
 
3338static int modules_open(struct inode *inode, struct file *file)
3339{
3340	return seq_open(file, &modules_op);
 
 
 
 
 
 
 
3341}
3342
3343static const struct file_operations proc_modules_operations = {
3344	.open		= modules_open,
3345	.read		= seq_read,
3346	.llseek		= seq_lseek,
3347	.release	= seq_release,
 
3348};
3349
3350static int __init proc_modules_init(void)
3351{
3352	proc_create("modules", 0, NULL, &proc_modules_operations);
3353	return 0;
3354}
3355module_init(proc_modules_init);
3356#endif
3357
3358/* Given an address, look for it in the module exception tables. */
3359const struct exception_table_entry *search_module_extables(unsigned long addr)
3360{
3361	const struct exception_table_entry *e = NULL;
3362	struct module *mod;
3363
3364	preempt_disable();
3365	list_for_each_entry_rcu(mod, &modules, list) {
3366		if (mod->num_exentries == 0)
3367			continue;
3368
3369		e = search_extable(mod->extable,
3370				   mod->extable + mod->num_exentries - 1,
3371				   addr);
3372		if (e)
3373			break;
3374	}
 
3375	preempt_enable();
3376
3377	/* Now, if we found one, we are running inside it now, hence
3378	   we cannot unload the module, hence no refcnt needed. */
 
 
3379	return e;
3380}
3381
3382/*
3383 * is_module_address - is this address inside a module?
3384 * @addr: the address to check.
3385 *
3386 * See is_module_text_address() if you simply want to see if the address
3387 * is code (not data).
3388 */
3389bool is_module_address(unsigned long addr)
3390{
3391	bool ret;
3392
3393	preempt_disable();
3394	ret = __module_address(addr) != NULL;
3395	preempt_enable();
3396
3397	return ret;
3398}
3399
3400/*
3401 * __module_address - get the module which contains an address.
3402 * @addr: the address.
3403 *
3404 * Must be called with preempt disabled or module mutex held so that
3405 * module doesn't get freed during this.
3406 */
3407struct module *__module_address(unsigned long addr)
3408{
3409	struct module *mod;
3410
3411	if (addr < module_addr_min || addr > module_addr_max)
3412		return NULL;
3413
3414	list_for_each_entry_rcu(mod, &modules, list)
3415		if (within_module_core(addr, mod)
3416		    || within_module_init(addr, mod))
3417			return mod;
3418	return NULL;
 
 
 
 
3419}
3420EXPORT_SYMBOL_GPL(__module_address);
3421
3422/*
3423 * is_module_text_address - is this address inside module code?
3424 * @addr: the address to check.
3425 *
3426 * See is_module_address() if you simply want to see if the address is
3427 * anywhere in a module.  See kernel_text_address() for testing if an
3428 * address corresponds to kernel or module code.
3429 */
3430bool is_module_text_address(unsigned long addr)
3431{
3432	bool ret;
3433
3434	preempt_disable();
3435	ret = __module_text_address(addr) != NULL;
3436	preempt_enable();
3437
3438	return ret;
3439}
3440
3441/*
3442 * __module_text_address - get the module whose code contains an address.
3443 * @addr: the address.
3444 *
3445 * Must be called with preempt disabled or module mutex held so that
3446 * module doesn't get freed during this.
3447 */
3448struct module *__module_text_address(unsigned long addr)
3449{
3450	struct module *mod = __module_address(addr);
3451	if (mod) {
3452		/* Make sure it's within the text section. */
3453		if (!within(addr, mod->module_init, mod->init_text_size)
3454		    && !within(addr, mod->module_core, mod->core_text_size))
3455			mod = NULL;
3456	}
3457	return mod;
3458}
3459EXPORT_SYMBOL_GPL(__module_text_address);
3460
3461/* Don't grab lock, we're oopsing. */
3462void print_modules(void)
3463{
3464	struct module *mod;
3465	char buf[8];
3466
3467	printk(KERN_DEFAULT "Modules linked in:");
3468	/* Most callers should already have preempt disabled, but make sure */
3469	preempt_disable();
3470	list_for_each_entry_rcu(mod, &modules, list)
3471		printk(" %s%s", mod->name, module_flags(mod, buf));
 
 
 
3472	preempt_enable();
3473	if (last_unloaded_module[0])
3474		printk(" [last unloaded: %s]", last_unloaded_module);
3475	printk("\n");
3476}
3477
3478#ifdef CONFIG_MODVERSIONS
3479/* Generate the signature for all relevant module structures here.
3480 * If these change, we don't want to try to parse the module. */
3481void module_layout(struct module *mod,
3482		   struct modversion_info *ver,
3483		   struct kernel_param *kp,
3484		   struct kernel_symbol *ks,
3485		   struct tracepoint * const *tp)
3486{
3487}
3488EXPORT_SYMBOL(module_layout);
3489#endif
3490
3491#ifdef CONFIG_TRACEPOINTS
3492void module_update_tracepoints(void)
3493{
3494	struct module *mod;
3495
3496	mutex_lock(&module_mutex);
3497	list_for_each_entry(mod, &modules, list)
3498		if (!mod->taints)
3499			tracepoint_update_probe_range(mod->tracepoints_ptrs,
3500				mod->tracepoints_ptrs + mod->num_tracepoints);
3501	mutex_unlock(&module_mutex);
3502}
3503
3504/*
3505 * Returns 0 if current not found.
3506 * Returns 1 if current found.
3507 */
3508int module_get_iter_tracepoints(struct tracepoint_iter *iter)
3509{
3510	struct module *iter_mod;
3511	int found = 0;
3512
3513	mutex_lock(&module_mutex);
3514	list_for_each_entry(iter_mod, &modules, list) {
3515		if (!iter_mod->taints) {
3516			/*
3517			 * Sorted module list
3518			 */
3519			if (iter_mod < iter->module)
3520				continue;
3521			else if (iter_mod > iter->module)
3522				iter->tracepoint = NULL;
3523			found = tracepoint_get_iter_range(&iter->tracepoint,
3524				iter_mod->tracepoints_ptrs,
3525				iter_mod->tracepoints_ptrs
3526					+ iter_mod->num_tracepoints);
3527			if (found) {
3528				iter->module = iter_mod;
3529				break;
3530			}
3531		}
3532	}
3533	mutex_unlock(&module_mutex);
3534	return found;
3535}
3536#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3   Copyright (C) 2002 Richard Henderson
   4   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
   5
 
 
 
 
 
 
 
 
 
 
 
 
 
   6*/
   7
   8#define INCLUDE_VERMAGIC
   9
  10#include <linux/export.h>
  11#include <linux/extable.h>
  12#include <linux/moduleloader.h>
  13#include <linux/module_signature.h>
  14#include <linux/trace_events.h>
  15#include <linux/init.h>
  16#include <linux/kallsyms.h>
  17#include <linux/file.h>
  18#include <linux/fs.h>
  19#include <linux/sysfs.h>
  20#include <linux/kernel.h>
  21#include <linux/slab.h>
  22#include <linux/vmalloc.h>
  23#include <linux/elf.h>
  24#include <linux/proc_fs.h>
  25#include <linux/security.h>
  26#include <linux/seq_file.h>
  27#include <linux/syscalls.h>
  28#include <linux/fcntl.h>
  29#include <linux/rcupdate.h>
  30#include <linux/capability.h>
  31#include <linux/cpu.h>
  32#include <linux/moduleparam.h>
  33#include <linux/errno.h>
  34#include <linux/err.h>
  35#include <linux/vermagic.h>
  36#include <linux/notifier.h>
  37#include <linux/sched.h>
 
  38#include <linux/device.h>
  39#include <linux/string.h>
  40#include <linux/mutex.h>
  41#include <linux/rculist.h>
  42#include <linux/uaccess.h>
  43#include <asm/cacheflush.h>
  44#include <linux/set_memory.h>
  45#include <asm/mmu_context.h>
  46#include <linux/license.h>
  47#include <asm/sections.h>
  48#include <linux/tracepoint.h>
  49#include <linux/ftrace.h>
  50#include <linux/livepatch.h>
  51#include <linux/async.h>
  52#include <linux/percpu.h>
  53#include <linux/kmemleak.h>
  54#include <linux/jump_label.h>
  55#include <linux/pfn.h>
  56#include <linux/bsearch.h>
  57#include <linux/dynamic_debug.h>
  58#include <linux/audit.h>
  59#include <uapi/linux/module.h>
  60#include "module-internal.h"
  61
  62#define CREATE_TRACE_POINTS
  63#include <trace/events/module.h>
  64
 
 
 
 
 
 
  65#ifndef ARCH_SHF_SMALL
  66#define ARCH_SHF_SMALL 0
  67#endif
  68
  69/*
  70 * Modules' sections will be aligned on page boundaries
  71 * to ensure complete separation of code and data, but
  72 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
  73 */
  74#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
  75# define debug_align(X) ALIGN(X, PAGE_SIZE)
  76#else
  77# define debug_align(X) (X)
  78#endif
  79
 
 
 
 
 
 
 
 
 
  80/* If this is set, the section belongs in the init part of the module */
  81#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  82
  83/*
  84 * Mutex protects:
  85 * 1) List of modules (also safely readable with preempt_disable),
  86 * 2) module_use links,
  87 * 3) module_addr_min/module_addr_max.
  88 * (delete and add uses RCU list operations). */
  89DEFINE_MUTEX(module_mutex);
  90EXPORT_SYMBOL_GPL(module_mutex);
  91static LIST_HEAD(modules);
  92
  93/* Work queue for freeing init sections in success case */
  94static struct work_struct init_free_wq;
  95static struct llist_head init_free_list;
  96
  97#ifdef CONFIG_MODULES_TREE_LOOKUP
  98
  99/*
 100 * Use a latched RB-tree for __module_address(); this allows us to use
 101 * RCU-sched lookups of the address from any context.
 102 *
 103 * This is conditional on PERF_EVENTS || TRACING because those can really hit
 104 * __module_address() hard by doing a lot of stack unwinding; potentially from
 105 * NMI context.
 106 */
 107
 108static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
 109{
 110	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 111
 112	return (unsigned long)layout->base;
 113}
 114
 115static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
 116{
 117	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 118
 119	return (unsigned long)layout->size;
 120}
 121
 122static __always_inline bool
 123mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
 124{
 125	return __mod_tree_val(a) < __mod_tree_val(b);
 126}
 127
 128static __always_inline int
 129mod_tree_comp(void *key, struct latch_tree_node *n)
 130{
 131	unsigned long val = (unsigned long)key;
 132	unsigned long start, end;
 133
 134	start = __mod_tree_val(n);
 135	if (val < start)
 136		return -1;
 137
 138	end = start + __mod_tree_size(n);
 139	if (val >= end)
 140		return 1;
 141
 142	return 0;
 143}
 144
 145static const struct latch_tree_ops mod_tree_ops = {
 146	.less = mod_tree_less,
 147	.comp = mod_tree_comp,
 148};
 149
 150static struct mod_tree_root {
 151	struct latch_tree_root root;
 152	unsigned long addr_min;
 153	unsigned long addr_max;
 154} mod_tree __cacheline_aligned = {
 155	.addr_min = -1UL,
 156};
 157
 158#define module_addr_min mod_tree.addr_min
 159#define module_addr_max mod_tree.addr_max
 160
 161static noinline void __mod_tree_insert(struct mod_tree_node *node)
 162{
 163	latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
 164}
 165
 166static void __mod_tree_remove(struct mod_tree_node *node)
 167{
 168	latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
 169}
 170
 171/*
 172 * These modifications: insert, remove_init and remove; are serialized by the
 173 * module_mutex.
 174 */
 175static void mod_tree_insert(struct module *mod)
 176{
 177	mod->core_layout.mtn.mod = mod;
 178	mod->init_layout.mtn.mod = mod;
 179
 180	__mod_tree_insert(&mod->core_layout.mtn);
 181	if (mod->init_layout.size)
 182		__mod_tree_insert(&mod->init_layout.mtn);
 183}
 184
 185static void mod_tree_remove_init(struct module *mod)
 186{
 187	if (mod->init_layout.size)
 188		__mod_tree_remove(&mod->init_layout.mtn);
 189}
 190
 191static void mod_tree_remove(struct module *mod)
 192{
 193	__mod_tree_remove(&mod->core_layout.mtn);
 194	mod_tree_remove_init(mod);
 195}
 196
 197static struct module *mod_find(unsigned long addr)
 198{
 199	struct latch_tree_node *ltn;
 200
 201	ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
 202	if (!ltn)
 203		return NULL;
 204
 205	return container_of(ltn, struct mod_tree_node, node)->mod;
 206}
 207
 208#else /* MODULES_TREE_LOOKUP */
 209
 210static unsigned long module_addr_min = -1UL, module_addr_max = 0;
 211
 212static void mod_tree_insert(struct module *mod) { }
 213static void mod_tree_remove_init(struct module *mod) { }
 214static void mod_tree_remove(struct module *mod) { }
 215
 216static struct module *mod_find(unsigned long addr)
 217{
 218	struct module *mod;
 219
 220	list_for_each_entry_rcu(mod, &modules, list,
 221				lockdep_is_held(&module_mutex)) {
 222		if (within_module(addr, mod))
 223			return mod;
 224	}
 225
 226	return NULL;
 227}
 228
 229#endif /* MODULES_TREE_LOOKUP */
 230
 231/*
 232 * Bounds of module text, for speeding up __module_address.
 233 * Protected by module_mutex.
 234 */
 235static void __mod_update_bounds(void *base, unsigned int size)
 236{
 237	unsigned long min = (unsigned long)base;
 238	unsigned long max = min + size;
 239
 240	if (min < module_addr_min)
 241		module_addr_min = min;
 242	if (max > module_addr_max)
 243		module_addr_max = max;
 244}
 245
 246static void mod_update_bounds(struct module *mod)
 247{
 248	__mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
 249	if (mod->init_layout.size)
 250		__mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
 251}
 252
 253#ifdef CONFIG_KGDB_KDB
 254struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
 255#endif /* CONFIG_KGDB_KDB */
 256
 257static void module_assert_mutex(void)
 258{
 259	lockdep_assert_held(&module_mutex);
 260}
 261
 262static void module_assert_mutex_or_preempt(void)
 263{
 264#ifdef CONFIG_LOCKDEP
 265	if (unlikely(!debug_locks))
 266		return;
 267
 268	WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
 269		!lockdep_is_held(&module_mutex));
 270#endif
 271}
 272
 273static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
 274module_param(sig_enforce, bool_enable_only, 0644);
 275
 276/*
 277 * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
 278 * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
 279 */
 280bool is_module_sig_enforced(void)
 281{
 282	return sig_enforce;
 283}
 284EXPORT_SYMBOL(is_module_sig_enforced);
 285
 286void set_module_sig_enforced(void)
 287{
 288	sig_enforce = true;
 289}
 290
 291/* Block module loading/unloading? */
 292int modules_disabled = 0;
 293core_param(nomodule, modules_disabled, bint, 0);
 294
 295/* Waiting for a module to finish initializing? */
 296static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 297
 298static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 299
 300int register_module_notifier(struct notifier_block *nb)
 
 
 
 
 301{
 302	return blocking_notifier_chain_register(&module_notify_list, nb);
 303}
 304EXPORT_SYMBOL(register_module_notifier);
 305
 306int unregister_module_notifier(struct notifier_block *nb)
 307{
 308	return blocking_notifier_chain_unregister(&module_notify_list, nb);
 309}
 310EXPORT_SYMBOL(unregister_module_notifier);
 311
 312/*
 313 * We require a truly strong try_module_get(): 0 means success.
 314 * Otherwise an error is returned due to ongoing or failed
 315 * initialization etc.
 316 */
 
 
 
 
 
 
 
 
 
 
 
 317static inline int strong_try_module_get(struct module *mod)
 318{
 319	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
 320	if (mod && mod->state == MODULE_STATE_COMING)
 321		return -EBUSY;
 322	if (try_module_get(mod))
 323		return 0;
 324	else
 325		return -ENOENT;
 326}
 327
 328static inline void add_taint_module(struct module *mod, unsigned flag,
 329				    enum lockdep_ok lockdep_ok)
 330{
 331	add_taint(flag, lockdep_ok);
 332	set_bit(flag, &mod->taints);
 333}
 334
 335/*
 336 * A thread that wants to hold a reference to a module only while it
 337 * is running can call this to safely exit.  nfsd and lockd use this.
 338 */
 339void __noreturn __module_put_and_exit(struct module *mod, long code)
 340{
 341	module_put(mod);
 342	do_exit(code);
 343}
 344EXPORT_SYMBOL(__module_put_and_exit);
 345
 346/* Find a module section: 0 means not found. */
 347static unsigned int find_sec(const struct load_info *info, const char *name)
 348{
 349	unsigned int i;
 350
 351	for (i = 1; i < info->hdr->e_shnum; i++) {
 352		Elf_Shdr *shdr = &info->sechdrs[i];
 353		/* Alloc bit cleared means "ignore it." */
 354		if ((shdr->sh_flags & SHF_ALLOC)
 355		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
 356			return i;
 357	}
 358	return 0;
 359}
 360
 361/* Find a module section, or NULL. */
 362static void *section_addr(const struct load_info *info, const char *name)
 363{
 364	/* Section 0 has sh_addr 0. */
 365	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
 366}
 367
 368/* Find a module section, or NULL.  Fill in number of "objects" in section. */
 369static void *section_objs(const struct load_info *info,
 370			  const char *name,
 371			  size_t object_size,
 372			  unsigned int *num)
 373{
 374	unsigned int sec = find_sec(info, name);
 375
 376	/* Section 0 has sh_addr 0 and sh_size 0. */
 377	*num = info->sechdrs[sec].sh_size / object_size;
 378	return (void *)info->sechdrs[sec].sh_addr;
 379}
 380
 381/* Provided by the linker */
 382extern const struct kernel_symbol __start___ksymtab[];
 383extern const struct kernel_symbol __stop___ksymtab[];
 384extern const struct kernel_symbol __start___ksymtab_gpl[];
 385extern const struct kernel_symbol __stop___ksymtab_gpl[];
 386extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 387extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
 388extern const s32 __start___kcrctab[];
 389extern const s32 __start___kcrctab_gpl[];
 390extern const s32 __start___kcrctab_gpl_future[];
 391#ifdef CONFIG_UNUSED_SYMBOLS
 392extern const struct kernel_symbol __start___ksymtab_unused[];
 393extern const struct kernel_symbol __stop___ksymtab_unused[];
 394extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 395extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
 396extern const s32 __start___kcrctab_unused[];
 397extern const s32 __start___kcrctab_unused_gpl[];
 398#endif
 399
 400#ifndef CONFIG_MODVERSIONS
 401#define symversion(base, idx) NULL
 402#else
 403#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
 404#endif
 405
 406static bool each_symbol_in_section(const struct symsearch *arr,
 407				   unsigned int arrsize,
 408				   struct module *owner,
 409				   bool (*fn)(const struct symsearch *syms,
 410					      struct module *owner,
 411					      void *data),
 412				   void *data)
 413{
 414	unsigned int j;
 415
 416	for (j = 0; j < arrsize; j++) {
 417		if (fn(&arr[j], owner, data))
 418			return true;
 419	}
 420
 421	return false;
 422}
 423
 424/* Returns true as soon as fn returns true, otherwise false. */
 425static bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 426				    struct module *owner,
 427				    void *data),
 428			 void *data)
 429{
 430	struct module *mod;
 431	static const struct symsearch arr[] = {
 432		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
 433		  NOT_GPL_ONLY, false },
 434		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
 435		  __start___kcrctab_gpl,
 436		  GPL_ONLY, false },
 437		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
 438		  __start___kcrctab_gpl_future,
 439		  WILL_BE_GPL_ONLY, false },
 440#ifdef CONFIG_UNUSED_SYMBOLS
 441		{ __start___ksymtab_unused, __stop___ksymtab_unused,
 442		  __start___kcrctab_unused,
 443		  NOT_GPL_ONLY, true },
 444		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
 445		  __start___kcrctab_unused_gpl,
 446		  GPL_ONLY, true },
 447#endif
 448	};
 449
 450	module_assert_mutex_or_preempt();
 451
 452	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
 453		return true;
 454
 455	list_for_each_entry_rcu(mod, &modules, list,
 456				lockdep_is_held(&module_mutex)) {
 457		struct symsearch arr[] = {
 458			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
 459			  NOT_GPL_ONLY, false },
 460			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
 461			  mod->gpl_crcs,
 462			  GPL_ONLY, false },
 463			{ mod->gpl_future_syms,
 464			  mod->gpl_future_syms + mod->num_gpl_future_syms,
 465			  mod->gpl_future_crcs,
 466			  WILL_BE_GPL_ONLY, false },
 467#ifdef CONFIG_UNUSED_SYMBOLS
 468			{ mod->unused_syms,
 469			  mod->unused_syms + mod->num_unused_syms,
 470			  mod->unused_crcs,
 471			  NOT_GPL_ONLY, true },
 472			{ mod->unused_gpl_syms,
 473			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
 474			  mod->unused_gpl_crcs,
 475			  GPL_ONLY, true },
 476#endif
 477		};
 478
 479		if (mod->state == MODULE_STATE_UNFORMED)
 480			continue;
 481
 482		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
 483			return true;
 484	}
 485	return false;
 486}
 
 487
 488struct find_symbol_arg {
 489	/* Input */
 490	const char *name;
 491	bool gplok;
 492	bool warn;
 493
 494	/* Output */
 495	struct module *owner;
 496	const s32 *crc;
 497	const struct kernel_symbol *sym;
 498	enum mod_license license;
 499};
 500
 501static bool check_exported_symbol(const struct symsearch *syms,
 502				  struct module *owner,
 503				  unsigned int symnum, void *data)
 504{
 505	struct find_symbol_arg *fsa = data;
 506
 507	if (!fsa->gplok) {
 508		if (syms->license == GPL_ONLY)
 509			return false;
 510		if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) {
 511			pr_warn("Symbol %s is being used by a non-GPL module, "
 512				"which will not be allowed in the future\n",
 513				fsa->name);
 
 
 
 514		}
 515	}
 516
 517#ifdef CONFIG_UNUSED_SYMBOLS
 518	if (syms->unused && fsa->warn) {
 519		pr_warn("Symbol %s is marked as UNUSED, however this module is "
 520			"using it.\n", fsa->name);
 521		pr_warn("This symbol will go away in the future.\n");
 522		pr_warn("Please evaluate if this is the right api to use and "
 523			"if it really is, submit a report to the linux kernel "
 524			"mailing list together with submitting your code for "
 525			"inclusion.\n");
 
 
 526	}
 527#endif
 528
 529	fsa->owner = owner;
 530	fsa->crc = symversion(syms->crcs, symnum);
 531	fsa->sym = &syms->start[symnum];
 532	fsa->license = syms->license;
 533	return true;
 534}
 535
 536static unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
 537{
 538#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 539	return (unsigned long)offset_to_ptr(&sym->value_offset);
 540#else
 541	return sym->value;
 542#endif
 543}
 544
 545static const char *kernel_symbol_name(const struct kernel_symbol *sym)
 546{
 547#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 548	return offset_to_ptr(&sym->name_offset);
 549#else
 550	return sym->name;
 551#endif
 552}
 553
 554static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
 555{
 556#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 557	if (!sym->namespace_offset)
 558		return NULL;
 559	return offset_to_ptr(&sym->namespace_offset);
 560#else
 561	return sym->namespace;
 562#endif
 563}
 564
 565static int cmp_name(const void *name, const void *sym)
 566{
 567	return strcmp(name, kernel_symbol_name(sym));
 568}
 569
 570static bool find_exported_symbol_in_section(const struct symsearch *syms,
 571					    struct module *owner,
 572					    void *data)
 573{
 574	struct find_symbol_arg *fsa = data;
 575	struct kernel_symbol *sym;
 576
 577	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
 578			sizeof(struct kernel_symbol), cmp_name);
 579
 580	if (sym != NULL && check_exported_symbol(syms, owner,
 581						 sym - syms->start, data))
 582		return true;
 583
 584	return false;
 585}
 586
 587/* Find an exported symbol and return it, along with, (optional) crc and
 588 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 589static const struct kernel_symbol *find_symbol(const char *name,
 590					struct module **owner,
 591					const s32 **crc,
 592					enum mod_license *license,
 593					bool gplok,
 594					bool warn)
 595{
 596	struct find_symbol_arg fsa;
 597
 598	fsa.name = name;
 599	fsa.gplok = gplok;
 600	fsa.warn = warn;
 601
 602	if (each_symbol_section(find_exported_symbol_in_section, &fsa)) {
 603		if (owner)
 604			*owner = fsa.owner;
 605		if (crc)
 606			*crc = fsa.crc;
 607		if (license)
 608			*license = fsa.license;
 609		return fsa.sym;
 610	}
 611
 612	pr_debug("Failed to find symbol %s\n", name);
 613	return NULL;
 614}
 
 615
 616/*
 617 * Search for module by name: must hold module_mutex (or preempt disabled
 618 * for read-only access).
 619 */
 620static struct module *find_module_all(const char *name, size_t len,
 621				      bool even_unformed)
 622{
 623	struct module *mod;
 624
 625	module_assert_mutex_or_preempt();
 626
 627	list_for_each_entry_rcu(mod, &modules, list,
 628				lockdep_is_held(&module_mutex)) {
 629		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
 630			continue;
 631		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
 632			return mod;
 633	}
 634	return NULL;
 635}
 636
 637struct module *find_module(const char *name)
 638{
 639	module_assert_mutex();
 640	return find_module_all(name, strlen(name), false);
 641}
 642EXPORT_SYMBOL_GPL(find_module);
 643
 644#ifdef CONFIG_SMP
 645
 646static inline void __percpu *mod_percpu(struct module *mod)
 647{
 648	return mod->percpu;
 649}
 650
 651static int percpu_modalloc(struct module *mod, struct load_info *info)
 
 652{
 653	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
 654	unsigned long align = pcpusec->sh_addralign;
 655
 656	if (!pcpusec->sh_size)
 657		return 0;
 658
 659	if (align > PAGE_SIZE) {
 660		pr_warn("%s: per-cpu alignment %li > %li\n",
 661			mod->name, align, PAGE_SIZE);
 662		align = PAGE_SIZE;
 663	}
 664
 665	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
 666	if (!mod->percpu) {
 667		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
 668			mod->name, (unsigned long)pcpusec->sh_size);
 
 669		return -ENOMEM;
 670	}
 671	mod->percpu_size = pcpusec->sh_size;
 672	return 0;
 673}
 674
 675static void percpu_modfree(struct module *mod)
 676{
 677	free_percpu(mod->percpu);
 678}
 679
 680static unsigned int find_pcpusec(struct load_info *info)
 681{
 682	return find_sec(info, ".data..percpu");
 683}
 684
 685static void percpu_modcopy(struct module *mod,
 686			   const void *from, unsigned long size)
 687{
 688	int cpu;
 689
 690	for_each_possible_cpu(cpu)
 691		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
 692}
 693
 694bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 
 
 
 
 
 
 
 
 
 695{
 696	struct module *mod;
 697	unsigned int cpu;
 698
 699	preempt_disable();
 700
 701	list_for_each_entry_rcu(mod, &modules, list) {
 702		if (mod->state == MODULE_STATE_UNFORMED)
 703			continue;
 704		if (!mod->percpu_size)
 705			continue;
 706		for_each_possible_cpu(cpu) {
 707			void *start = per_cpu_ptr(mod->percpu, cpu);
 708			void *va = (void *)addr;
 709
 710			if (va >= start && va < start + mod->percpu_size) {
 711				if (can_addr) {
 712					*can_addr = (unsigned long) (va - start);
 713					*can_addr += (unsigned long)
 714						per_cpu_ptr(mod->percpu,
 715							    get_boot_cpu_id());
 716				}
 717				preempt_enable();
 718				return true;
 719			}
 720		}
 721	}
 722
 723	preempt_enable();
 724	return false;
 725}
 726
 727/**
 728 * is_module_percpu_address - test whether address is from module static percpu
 729 * @addr: address to test
 730 *
 731 * Test whether @addr belongs to module static percpu area.
 732 *
 733 * RETURNS:
 734 * %true if @addr is from module static percpu area
 735 */
 736bool is_module_percpu_address(unsigned long addr)
 737{
 738	return __is_module_percpu_address(addr, NULL);
 739}
 740
 741#else /* ... !CONFIG_SMP */
 742
 743static inline void __percpu *mod_percpu(struct module *mod)
 744{
 745	return NULL;
 746}
 747static int percpu_modalloc(struct module *mod, struct load_info *info)
 
 748{
 749	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
 750	if (info->sechdrs[info->index.pcpu].sh_size != 0)
 751		return -ENOMEM;
 752	return 0;
 753}
 754static inline void percpu_modfree(struct module *mod)
 755{
 756}
 757static unsigned int find_pcpusec(struct load_info *info)
 758{
 759	return 0;
 760}
 761static inline void percpu_modcopy(struct module *mod,
 762				  const void *from, unsigned long size)
 763{
 764	/* pcpusec should be 0, and size of that section should be 0. */
 765	BUG_ON(size != 0);
 766}
 767bool is_module_percpu_address(unsigned long addr)
 768{
 769	return false;
 770}
 771
 772bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 773{
 774	return false;
 775}
 776
 777#endif /* CONFIG_SMP */
 778
 779#define MODINFO_ATTR(field)	\
 780static void setup_modinfo_##field(struct module *mod, const char *s)  \
 781{                                                                     \
 782	mod->field = kstrdup(s, GFP_KERNEL);                          \
 783}                                                                     \
 784static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
 785			struct module_kobject *mk, char *buffer)      \
 786{                                                                     \
 787	return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
 788}                                                                     \
 789static int modinfo_##field##_exists(struct module *mod)               \
 790{                                                                     \
 791	return mod->field != NULL;                                    \
 792}                                                                     \
 793static void free_modinfo_##field(struct module *mod)                  \
 794{                                                                     \
 795	kfree(mod->field);                                            \
 796	mod->field = NULL;                                            \
 797}                                                                     \
 798static struct module_attribute modinfo_##field = {                    \
 799	.attr = { .name = __stringify(field), .mode = 0444 },         \
 800	.show = show_modinfo_##field,                                 \
 801	.setup = setup_modinfo_##field,                               \
 802	.test = modinfo_##field##_exists,                             \
 803	.free = free_modinfo_##field,                                 \
 804};
 805
 806MODINFO_ATTR(version);
 807MODINFO_ATTR(srcversion);
 808
 809static char last_unloaded_module[MODULE_NAME_LEN+1];
 810
 811#ifdef CONFIG_MODULE_UNLOAD
 812
 813EXPORT_TRACEPOINT_SYMBOL(module_get);
 814
 815/* MODULE_REF_BASE is the base reference count by kmodule loader. */
 816#define MODULE_REF_BASE	1
 817
 818/* Init the unload section of the module. */
 819static int module_unload_init(struct module *mod)
 820{
 821	/*
 822	 * Initialize reference counter to MODULE_REF_BASE.
 823	 * refcnt == 0 means module is going.
 824	 */
 825	atomic_set(&mod->refcnt, MODULE_REF_BASE);
 826
 827	INIT_LIST_HEAD(&mod->source_list);
 828	INIT_LIST_HEAD(&mod->target_list);
 829
 830	/* Hold reference count during initialization. */
 831	atomic_inc(&mod->refcnt);
 
 
 832
 833	return 0;
 834}
 835
 836/* Does a already use b? */
 837static int already_uses(struct module *a, struct module *b)
 838{
 839	struct module_use *use;
 840
 841	list_for_each_entry(use, &b->source_list, source_list) {
 842		if (use->source == a) {
 843			pr_debug("%s uses %s!\n", a->name, b->name);
 844			return 1;
 845		}
 846	}
 847	pr_debug("%s does not use %s!\n", a->name, b->name);
 848	return 0;
 849}
 850
 851/*
 852 * Module a uses b
 853 *  - we add 'a' as a "source", 'b' as a "target" of module use
 854 *  - the module_use is added to the list of 'b' sources (so
 855 *    'b' can walk the list to see who sourced them), and of 'a'
 856 *    targets (so 'a' can see what modules it targets).
 857 */
 858static int add_module_usage(struct module *a, struct module *b)
 859{
 860	struct module_use *use;
 861
 862	pr_debug("Allocating new usage for %s.\n", a->name);
 863	use = kmalloc(sizeof(*use), GFP_ATOMIC);
 864	if (!use)
 
 865		return -ENOMEM;
 
 866
 867	use->source = a;
 868	use->target = b;
 869	list_add(&use->source_list, &b->source_list);
 870	list_add(&use->target_list, &a->target_list);
 871	return 0;
 872}
 873
 874/* Module a uses b: caller needs module_mutex() */
 875static int ref_module(struct module *a, struct module *b)
 876{
 877	int err;
 878
 879	if (b == NULL || already_uses(a, b))
 880		return 0;
 881
 882	/* If module isn't available, we fail. */
 883	err = strong_try_module_get(b);
 884	if (err)
 885		return err;
 886
 887	err = add_module_usage(a, b);
 888	if (err) {
 889		module_put(b);
 890		return err;
 891	}
 892	return 0;
 893}
 
 894
 895/* Clear the unload stuff of the module. */
 896static void module_unload_free(struct module *mod)
 897{
 898	struct module_use *use, *tmp;
 899
 900	mutex_lock(&module_mutex);
 901	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
 902		struct module *i = use->target;
 903		pr_debug("%s unusing %s\n", mod->name, i->name);
 904		module_put(i);
 905		list_del(&use->source_list);
 906		list_del(&use->target_list);
 907		kfree(use);
 908	}
 909	mutex_unlock(&module_mutex);
 
 
 910}
 911
 912#ifdef CONFIG_MODULE_FORCE_UNLOAD
 913static inline int try_force_unload(unsigned int flags)
 914{
 915	int ret = (flags & O_TRUNC);
 916	if (ret)
 917		add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
 918	return ret;
 919}
 920#else
 921static inline int try_force_unload(unsigned int flags)
 922{
 923	return 0;
 924}
 925#endif /* CONFIG_MODULE_FORCE_UNLOAD */
 926
 927/* Try to release refcount of module, 0 means success. */
 928static int try_release_module_ref(struct module *mod)
 929{
 930	int ret;
 
 
 
 931
 932	/* Try to decrement refcnt which we set at loading */
 933	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
 934	BUG_ON(ret < 0);
 935	if (ret)
 936		/* Someone can put this right now, recover with checking */
 937		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
 938
 939	return ret;
 940}
 941
 942static int try_stop_module(struct module *mod, int flags, int *forced)
 943{
 944	/* If it's not unused, quit unless we're forcing. */
 945	if (try_release_module_ref(mod) != 0) {
 946		*forced = try_force_unload(flags);
 947		if (!(*forced))
 948			return -EWOULDBLOCK;
 949	}
 950
 951	/* Mark it as dying. */
 952	mod->state = MODULE_STATE_GOING;
 
 
 
 
 
 
 
 953
 954	return 0;
 
 
 
 
 
 
 955}
 956
 957/**
 958 * module_refcount - return the refcount or -1 if unloading
 959 *
 960 * @mod:	the module we're checking
 961 *
 962 * Returns:
 963 *	-1 if the module is in the process of unloading
 964 *	otherwise the number of references in the kernel to the module
 965 */
 966int module_refcount(struct module *mod)
 967{
 968	return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 969}
 970EXPORT_SYMBOL(module_refcount);
 971
 972/* This exists whether we can unload or not */
 973static void free_module(struct module *mod);
 974
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 975SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 976		unsigned int, flags)
 977{
 978	struct module *mod;
 979	char name[MODULE_NAME_LEN];
 980	int ret, forced = 0;
 981
 982	if (!capable(CAP_SYS_MODULE) || modules_disabled)
 983		return -EPERM;
 984
 985	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
 986		return -EFAULT;
 987	name[MODULE_NAME_LEN-1] = '\0';
 988
 989	audit_log_kern_module(name);
 990
 991	if (mutex_lock_interruptible(&module_mutex) != 0)
 992		return -EINTR;
 993
 994	mod = find_module(name);
 995	if (!mod) {
 996		ret = -ENOENT;
 997		goto out;
 998	}
 999
1000	if (!list_empty(&mod->source_list)) {
1001		/* Other modules depend on us: get rid of them first. */
1002		ret = -EWOULDBLOCK;
1003		goto out;
1004	}
1005
1006	/* Doing init or already dying? */
1007	if (mod->state != MODULE_STATE_LIVE) {
1008		/* FIXME: if (force), slam module count damn the torpedoes */
1009		pr_debug("%s already dying\n", mod->name);
 
1010		ret = -EBUSY;
1011		goto out;
1012	}
1013
1014	/* If it has an init func, it must have an exit func to unload */
1015	if (mod->init && !mod->exit) {
1016		forced = try_force_unload(flags);
1017		if (!forced) {
1018			/* This module can't be removed */
1019			ret = -EBUSY;
1020			goto out;
1021		}
1022	}
1023
 
 
 
1024	/* Stop the machine so refcounts can't move and disable module. */
1025	ret = try_stop_module(mod, flags, &forced);
1026	if (ret != 0)
1027		goto out;
1028
 
 
 
 
1029	mutex_unlock(&module_mutex);
1030	/* Final destruction now no one is using it. */
1031	if (mod->exit != NULL)
1032		mod->exit();
1033	blocking_notifier_call_chain(&module_notify_list,
1034				     MODULE_STATE_GOING, mod);
1035	klp_module_going(mod);
1036	ftrace_release_mod(mod);
1037
1038	async_synchronize_full();
1039
1040	/* Store the name of the last unloaded module for diagnostic purposes */
1041	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
1042
1043	free_module(mod);
1044	/* someone could wait for the module in add_unformed_module() */
1045	wake_up_all(&module_wq);
1046	return 0;
1047out:
1048	mutex_unlock(&module_mutex);
1049	return ret;
1050}
1051
1052static inline void print_unload_info(struct seq_file *m, struct module *mod)
1053{
1054	struct module_use *use;
1055	int printed_something = 0;
1056
1057	seq_printf(m, " %i ", module_refcount(mod));
1058
1059	/*
1060	 * Always include a trailing , so userspace can differentiate
1061	 * between this and the old multi-field proc format.
1062	 */
1063	list_for_each_entry(use, &mod->source_list, source_list) {
1064		printed_something = 1;
1065		seq_printf(m, "%s,", use->source->name);
1066	}
1067
1068	if (mod->init != NULL && mod->exit == NULL) {
1069		printed_something = 1;
1070		seq_puts(m, "[permanent],");
1071	}
1072
1073	if (!printed_something)
1074		seq_puts(m, "-");
1075}
1076
1077void __symbol_put(const char *symbol)
1078{
1079	struct module *owner;
1080
1081	preempt_disable();
1082	if (!find_symbol(symbol, &owner, NULL, NULL, true, false))
1083		BUG();
1084	module_put(owner);
1085	preempt_enable();
1086}
1087EXPORT_SYMBOL(__symbol_put);
1088
1089/* Note this assumes addr is a function, which it currently always is. */
1090void symbol_put_addr(void *addr)
1091{
1092	struct module *modaddr;
1093	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1094
1095	if (core_kernel_text(a))
1096		return;
1097
1098	/*
1099	 * Even though we hold a reference on the module; we still need to
1100	 * disable preemption in order to safely traverse the data structure.
1101	 */
1102	preempt_disable();
1103	modaddr = __module_text_address(a);
1104	BUG_ON(!modaddr);
1105	module_put(modaddr);
1106	preempt_enable();
1107}
1108EXPORT_SYMBOL_GPL(symbol_put_addr);
1109
1110static ssize_t show_refcnt(struct module_attribute *mattr,
1111			   struct module_kobject *mk, char *buffer)
1112{
1113	return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1114}
1115
1116static struct module_attribute modinfo_refcnt =
1117	__ATTR(refcnt, 0444, show_refcnt, NULL);
 
 
1118
1119void __module_get(struct module *module)
1120{
1121	if (module) {
1122		preempt_disable();
1123		atomic_inc(&module->refcnt);
1124		trace_module_get(module, _RET_IP_);
1125		preempt_enable();
1126	}
1127}
1128EXPORT_SYMBOL(__module_get);
1129
1130bool try_module_get(struct module *module)
1131{
1132	bool ret = true;
1133
1134	if (module) {
1135		preempt_disable();
1136		/* Note: here, we can fail to get a reference */
1137		if (likely(module_is_live(module) &&
1138			   atomic_inc_not_zero(&module->refcnt) != 0))
1139			trace_module_get(module, _RET_IP_);
1140		else
1141			ret = false;
1142
1143		preempt_enable();
1144	}
1145	return ret;
1146}
1147EXPORT_SYMBOL(try_module_get);
1148
1149void module_put(struct module *module)
1150{
1151	int ret;
1152
1153	if (module) {
1154		preempt_disable();
1155		ret = atomic_dec_if_positive(&module->refcnt);
1156		WARN_ON(ret < 0);	/* Failed to put refcount */
1157		trace_module_put(module, _RET_IP_);
 
 
 
1158		preempt_enable();
1159	}
1160}
1161EXPORT_SYMBOL(module_put);
1162
1163#else /* !CONFIG_MODULE_UNLOAD */
1164static inline void print_unload_info(struct seq_file *m, struct module *mod)
1165{
1166	/* We don't know the usage count, or what modules are using. */
1167	seq_puts(m, " - -");
1168}
1169
1170static inline void module_unload_free(struct module *mod)
1171{
1172}
1173
1174static int ref_module(struct module *a, struct module *b)
1175{
1176	return strong_try_module_get(b);
1177}
 
1178
1179static inline int module_unload_init(struct module *mod)
1180{
1181	return 0;
1182}
1183#endif /* CONFIG_MODULE_UNLOAD */
1184
1185static size_t module_flags_taint(struct module *mod, char *buf)
1186{
1187	size_t l = 0;
1188	int i;
1189
1190	for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1191		if (taint_flags[i].module && test_bit(i, &mod->taints))
1192			buf[l++] = taint_flags[i].c_true;
1193	}
1194
1195	return l;
1196}
1197
1198static ssize_t show_initstate(struct module_attribute *mattr,
1199			      struct module_kobject *mk, char *buffer)
1200{
1201	const char *state = "unknown";
1202
1203	switch (mk->mod->state) {
1204	case MODULE_STATE_LIVE:
1205		state = "live";
1206		break;
1207	case MODULE_STATE_COMING:
1208		state = "coming";
1209		break;
1210	case MODULE_STATE_GOING:
1211		state = "going";
1212		break;
1213	default:
1214		BUG();
1215	}
1216	return sprintf(buffer, "%s\n", state);
1217}
1218
1219static struct module_attribute modinfo_initstate =
1220	__ATTR(initstate, 0444, show_initstate, NULL);
 
 
1221
1222static ssize_t store_uevent(struct module_attribute *mattr,
1223			    struct module_kobject *mk,
1224			    const char *buffer, size_t count)
1225{
1226	int rc;
1227
1228	rc = kobject_synth_uevent(&mk->kobj, buffer, count);
1229	return rc ? rc : count;
 
1230}
1231
1232struct module_attribute module_uevent =
1233	__ATTR(uevent, 0200, NULL, store_uevent);
1234
1235static ssize_t show_coresize(struct module_attribute *mattr,
1236			     struct module_kobject *mk, char *buffer)
1237{
1238	return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1239}
1240
1241static struct module_attribute modinfo_coresize =
1242	__ATTR(coresize, 0444, show_coresize, NULL);
1243
1244static ssize_t show_initsize(struct module_attribute *mattr,
1245			     struct module_kobject *mk, char *buffer)
1246{
1247	return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1248}
1249
1250static struct module_attribute modinfo_initsize =
1251	__ATTR(initsize, 0444, show_initsize, NULL);
1252
1253static ssize_t show_taint(struct module_attribute *mattr,
1254			  struct module_kobject *mk, char *buffer)
1255{
1256	size_t l;
1257
1258	l = module_flags_taint(mk->mod, buffer);
1259	buffer[l++] = '\n';
1260	return l;
1261}
1262
1263static struct module_attribute modinfo_taint =
1264	__ATTR(taint, 0444, show_taint, NULL);
1265
1266static struct module_attribute *modinfo_attrs[] = {
1267	&module_uevent,
1268	&modinfo_version,
1269	&modinfo_srcversion,
1270	&modinfo_initstate,
1271	&modinfo_coresize,
1272	&modinfo_initsize,
1273	&modinfo_taint,
1274#ifdef CONFIG_MODULE_UNLOAD
1275	&modinfo_refcnt,
1276#endif
1277	NULL,
1278};
1279
1280static const char vermagic[] = VERMAGIC_STRING;
1281
1282static int try_to_force_load(struct module *mod, const char *reason)
1283{
1284#ifdef CONFIG_MODULE_FORCE_LOAD
1285	if (!test_taint(TAINT_FORCED_MODULE))
1286		pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1287	add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
 
1288	return 0;
1289#else
1290	return -ENOEXEC;
1291#endif
1292}
1293
1294#ifdef CONFIG_MODVERSIONS
1295
1296static u32 resolve_rel_crc(const s32 *crc)
 
1297{
1298	return *(u32 *)((void *)crc + *crc);
 
 
 
 
1299}
1300
1301static int check_version(const struct load_info *info,
 
1302			 const char *symname,
1303			 struct module *mod,
1304			 const s32 *crc)
 
1305{
1306	Elf_Shdr *sechdrs = info->sechdrs;
1307	unsigned int versindex = info->index.vers;
1308	unsigned int i, num_versions;
1309	struct modversion_info *versions;
1310
1311	/* Exporting module didn't supply crcs?  OK, we're already tainted. */
1312	if (!crc)
1313		return 1;
1314
1315	/* No versions at all?  modprobe --force does this. */
1316	if (versindex == 0)
1317		return try_to_force_load(mod, symname) == 0;
1318
1319	versions = (void *) sechdrs[versindex].sh_addr;
1320	num_versions = sechdrs[versindex].sh_size
1321		/ sizeof(struct modversion_info);
1322
1323	for (i = 0; i < num_versions; i++) {
1324		u32 crcval;
1325
1326		if (strcmp(versions[i].name, symname) != 0)
1327			continue;
1328
1329		if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1330			crcval = resolve_rel_crc(crc);
1331		else
1332			crcval = *crc;
1333		if (versions[i].crc == crcval)
1334			return 1;
1335		pr_debug("Found checksum %X vs module %lX\n",
1336			 crcval, versions[i].crc);
1337		goto bad_version;
1338	}
1339
1340	/* Broken toolchain. Warn once, then let it go.. */
1341	pr_warn_once("%s: no symbol version for %s\n", info->name, symname);
1342	return 1;
1343
1344bad_version:
1345	pr_warn("%s: disagrees about version of symbol %s\n",
1346	       info->name, symname);
1347	return 0;
1348}
1349
1350static inline int check_modstruct_version(const struct load_info *info,
 
1351					  struct module *mod)
1352{
1353	const s32 *crc;
1354
1355	/*
1356	 * Since this should be found in kernel (which can't be removed), no
1357	 * locking is necessary -- use preempt_disable() to placate lockdep.
1358	 */
1359	preempt_disable();
1360	if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) {
1361		preempt_enable();
1362		BUG();
1363	}
1364	preempt_enable();
1365	return check_version(info, "module_layout", mod, crc);
1366}
1367
1368/* First part is kernel version, which we ignore if module has crcs. */
1369static inline int same_magic(const char *amagic, const char *bmagic,
1370			     bool has_crcs)
1371{
1372	if (has_crcs) {
1373		amagic += strcspn(amagic, " ");
1374		bmagic += strcspn(bmagic, " ");
1375	}
1376	return strcmp(amagic, bmagic) == 0;
1377}
1378#else
1379static inline int check_version(const struct load_info *info,
 
1380				const char *symname,
1381				struct module *mod,
1382				const s32 *crc)
 
1383{
1384	return 1;
1385}
1386
1387static inline int check_modstruct_version(const struct load_info *info,
 
1388					  struct module *mod)
1389{
1390	return 1;
1391}
1392
1393static inline int same_magic(const char *amagic, const char *bmagic,
1394			     bool has_crcs)
1395{
1396	return strcmp(amagic, bmagic) == 0;
1397}
1398#endif /* CONFIG_MODVERSIONS */
1399
1400static char *get_modinfo(const struct load_info *info, const char *tag);
1401static char *get_next_modinfo(const struct load_info *info, const char *tag,
1402			      char *prev);
1403
1404static int verify_namespace_is_imported(const struct load_info *info,
1405					const struct kernel_symbol *sym,
1406					struct module *mod)
1407{
1408	const char *namespace;
1409	char *imported_namespace;
1410
1411	namespace = kernel_symbol_namespace(sym);
1412	if (namespace && namespace[0]) {
1413		imported_namespace = get_modinfo(info, "import_ns");
1414		while (imported_namespace) {
1415			if (strcmp(namespace, imported_namespace) == 0)
1416				return 0;
1417			imported_namespace = get_next_modinfo(
1418				info, "import_ns", imported_namespace);
1419		}
1420#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1421		pr_warn(
1422#else
1423		pr_err(
1424#endif
1425			"%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1426			mod->name, kernel_symbol_name(sym), namespace);
1427#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1428		return -EINVAL;
1429#endif
1430	}
1431	return 0;
1432}
1433
1434static bool inherit_taint(struct module *mod, struct module *owner)
1435{
1436	if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
1437		return true;
1438
1439	if (mod->using_gplonly_symbols) {
1440		pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
1441			mod->name, owner->name);
1442		return false;
1443	}
1444
1445	if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
1446		pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
1447			mod->name, owner->name);
1448		set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
1449	}
1450	return true;
1451}
1452
1453/* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1454static const struct kernel_symbol *resolve_symbol(struct module *mod,
1455						  const struct load_info *info,
1456						  const char *name,
1457						  char ownername[])
1458{
1459	struct module *owner;
1460	const struct kernel_symbol *sym;
1461	const s32 *crc;
1462	enum mod_license license;
1463	int err;
1464
1465	/*
1466	 * The module_mutex should not be a heavily contended lock;
1467	 * if we get the occasional sleep here, we'll go an extra iteration
1468	 * in the wait_event_interruptible(), which is harmless.
1469	 */
1470	sched_annotate_sleep();
1471	mutex_lock(&module_mutex);
1472	sym = find_symbol(name, &owner, &crc, &license,
1473			  !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1474	if (!sym)
1475		goto unlock;
1476
1477	if (license == GPL_ONLY)
1478		mod->using_gplonly_symbols = true;
1479
1480	if (!inherit_taint(mod, owner)) {
1481		sym = NULL;
1482		goto getname;
1483	}
1484
1485	if (!check_version(info, name, mod, crc)) {
1486		sym = ERR_PTR(-EINVAL);
1487		goto getname;
1488	}
1489
1490	err = verify_namespace_is_imported(info, sym, mod);
1491	if (err) {
1492		sym = ERR_PTR(err);
1493		goto getname;
1494	}
1495
1496	err = ref_module(mod, owner);
1497	if (err) {
1498		sym = ERR_PTR(err);
1499		goto getname;
1500	}
1501
1502getname:
1503	/* We must make copy under the lock if we failed to get ref. */
1504	strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1505unlock:
1506	mutex_unlock(&module_mutex);
1507	return sym;
1508}
1509
1510static const struct kernel_symbol *
1511resolve_symbol_wait(struct module *mod,
1512		    const struct load_info *info,
1513		    const char *name)
1514{
1515	const struct kernel_symbol *ksym;
1516	char owner[MODULE_NAME_LEN];
1517
1518	if (wait_event_interruptible_timeout(module_wq,
1519			!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1520			|| PTR_ERR(ksym) != -EBUSY,
1521					     30 * HZ) <= 0) {
1522		pr_warn("%s: gave up waiting for init of module %s.\n",
1523			mod->name, owner);
1524	}
1525	return ksym;
1526}
1527
1528/*
1529 * /sys/module/foo/sections stuff
1530 * J. Corbet <corbet@lwn.net>
1531 */
1532#ifdef CONFIG_SYSFS
1533
1534#ifdef CONFIG_KALLSYMS
1535static inline bool sect_empty(const Elf_Shdr *sect)
1536{
1537	return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1538}
1539
1540struct module_sect_attr {
1541	struct bin_attribute battr;
 
 
1542	unsigned long address;
1543};
1544
1545struct module_sect_attrs {
 
1546	struct attribute_group grp;
1547	unsigned int nsections;
1548	struct module_sect_attr attrs[];
1549};
1550
1551#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
1552static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
1553				struct bin_attribute *battr,
1554				char *buf, loff_t pos, size_t count)
1555{
1556	struct module_sect_attr *sattr =
1557		container_of(battr, struct module_sect_attr, battr);
1558	char bounce[MODULE_SECT_READ_SIZE + 1];
1559	size_t wrote;
1560
1561	if (pos != 0)
1562		return -EINVAL;
1563
1564	/*
1565	 * Since we're a binary read handler, we must account for the
1566	 * trailing NUL byte that sprintf will write: if "buf" is
1567	 * too small to hold the NUL, or the NUL is exactly the last
1568	 * byte, the read will look like it got truncated by one byte.
1569	 * Since there is no way to ask sprintf nicely to not write
1570	 * the NUL, we have to use a bounce buffer.
1571	 */
1572	wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
1573			 kallsyms_show_value(file->f_cred)
1574				? (void *)sattr->address : NULL);
1575	count = min(count, wrote);
1576	memcpy(buf, bounce, count);
1577
1578	return count;
1579}
1580
1581static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1582{
1583	unsigned int section;
1584
1585	for (section = 0; section < sect_attrs->nsections; section++)
1586		kfree(sect_attrs->attrs[section].battr.attr.name);
1587	kfree(sect_attrs);
1588}
1589
1590static void add_sect_attrs(struct module *mod, const struct load_info *info)
1591{
1592	unsigned int nloaded = 0, i, size[2];
1593	struct module_sect_attrs *sect_attrs;
1594	struct module_sect_attr *sattr;
1595	struct bin_attribute **gattr;
1596
1597	/* Count loaded sections and allocate structures */
1598	for (i = 0; i < info->hdr->e_shnum; i++)
1599		if (!sect_empty(&info->sechdrs[i]))
1600			nloaded++;
1601	size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
1602			sizeof(sect_attrs->grp.bin_attrs[0]));
1603	size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
 
1604	sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1605	if (sect_attrs == NULL)
1606		return;
1607
1608	/* Setup section attributes. */
1609	sect_attrs->grp.name = "sections";
1610	sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
1611
1612	sect_attrs->nsections = 0;
1613	sattr = &sect_attrs->attrs[0];
1614	gattr = &sect_attrs->grp.bin_attrs[0];
1615	for (i = 0; i < info->hdr->e_shnum; i++) {
1616		Elf_Shdr *sec = &info->sechdrs[i];
1617		if (sect_empty(sec))
1618			continue;
1619		sysfs_bin_attr_init(&sattr->battr);
1620		sattr->address = sec->sh_addr;
1621		sattr->battr.attr.name =
1622			kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
1623		if (sattr->battr.attr.name == NULL)
1624			goto out;
1625		sect_attrs->nsections++;
1626		sattr->battr.read = module_sect_read;
1627		sattr->battr.size = MODULE_SECT_READ_SIZE;
1628		sattr->battr.attr.mode = 0400;
1629		*(gattr++) = &(sattr++)->battr;
 
 
1630	}
1631	*gattr = NULL;
1632
1633	if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1634		goto out;
1635
1636	mod->sect_attrs = sect_attrs;
1637	return;
1638  out:
1639	free_sect_attrs(sect_attrs);
1640}
1641
1642static void remove_sect_attrs(struct module *mod)
1643{
1644	if (mod->sect_attrs) {
1645		sysfs_remove_group(&mod->mkobj.kobj,
1646				   &mod->sect_attrs->grp);
1647		/* We are positive that no one is using any sect attrs
1648		 * at this point.  Deallocate immediately. */
1649		free_sect_attrs(mod->sect_attrs);
1650		mod->sect_attrs = NULL;
1651	}
1652}
1653
1654/*
1655 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1656 */
1657
1658struct module_notes_attrs {
1659	struct kobject *dir;
1660	unsigned int notes;
1661	struct bin_attribute attrs[];
1662};
1663
1664static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1665				 struct bin_attribute *bin_attr,
1666				 char *buf, loff_t pos, size_t count)
1667{
1668	/*
1669	 * The caller checked the pos and count against our size.
1670	 */
1671	memcpy(buf, bin_attr->private + pos, count);
1672	return count;
1673}
1674
1675static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1676			     unsigned int i)
1677{
1678	if (notes_attrs->dir) {
1679		while (i-- > 0)
1680			sysfs_remove_bin_file(notes_attrs->dir,
1681					      &notes_attrs->attrs[i]);
1682		kobject_put(notes_attrs->dir);
1683	}
1684	kfree(notes_attrs);
1685}
1686
1687static void add_notes_attrs(struct module *mod, const struct load_info *info)
1688{
1689	unsigned int notes, loaded, i;
1690	struct module_notes_attrs *notes_attrs;
1691	struct bin_attribute *nattr;
1692
1693	/* failed to create section attributes, so can't create notes */
1694	if (!mod->sect_attrs)
1695		return;
1696
1697	/* Count notes sections and allocate structures.  */
1698	notes = 0;
1699	for (i = 0; i < info->hdr->e_shnum; i++)
1700		if (!sect_empty(&info->sechdrs[i]) &&
1701		    (info->sechdrs[i].sh_type == SHT_NOTE))
1702			++notes;
1703
1704	if (notes == 0)
1705		return;
1706
1707	notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
 
1708			      GFP_KERNEL);
1709	if (notes_attrs == NULL)
1710		return;
1711
1712	notes_attrs->notes = notes;
1713	nattr = &notes_attrs->attrs[0];
1714	for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1715		if (sect_empty(&info->sechdrs[i]))
1716			continue;
1717		if (info->sechdrs[i].sh_type == SHT_NOTE) {
1718			sysfs_bin_attr_init(nattr);
1719			nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
1720			nattr->attr.mode = S_IRUGO;
1721			nattr->size = info->sechdrs[i].sh_size;
1722			nattr->private = (void *) info->sechdrs[i].sh_addr;
1723			nattr->read = module_notes_read;
1724			++nattr;
1725		}
1726		++loaded;
1727	}
1728
1729	notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1730	if (!notes_attrs->dir)
1731		goto out;
1732
1733	for (i = 0; i < notes; ++i)
1734		if (sysfs_create_bin_file(notes_attrs->dir,
1735					  &notes_attrs->attrs[i]))
1736			goto out;
1737
1738	mod->notes_attrs = notes_attrs;
1739	return;
1740
1741  out:
1742	free_notes_attrs(notes_attrs, i);
1743}
1744
1745static void remove_notes_attrs(struct module *mod)
1746{
1747	if (mod->notes_attrs)
1748		free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1749}
1750
1751#else
1752
1753static inline void add_sect_attrs(struct module *mod,
1754				  const struct load_info *info)
1755{
1756}
1757
1758static inline void remove_sect_attrs(struct module *mod)
1759{
1760}
1761
1762static inline void add_notes_attrs(struct module *mod,
1763				   const struct load_info *info)
1764{
1765}
1766
1767static inline void remove_notes_attrs(struct module *mod)
1768{
1769}
1770#endif /* CONFIG_KALLSYMS */
1771
1772static void del_usage_links(struct module *mod)
1773{
1774#ifdef CONFIG_MODULE_UNLOAD
1775	struct module_use *use;
 
1776
1777	mutex_lock(&module_mutex);
1778	list_for_each_entry(use, &mod->target_list, target_list)
1779		sysfs_remove_link(use->target->holders_dir, mod->name);
 
 
1780	mutex_unlock(&module_mutex);
1781#endif
1782}
1783
1784static int add_usage_links(struct module *mod)
1785{
1786	int ret = 0;
1787#ifdef CONFIG_MODULE_UNLOAD
1788	struct module_use *use;
1789
1790	mutex_lock(&module_mutex);
1791	list_for_each_entry(use, &mod->target_list, target_list) {
1792		ret = sysfs_create_link(use->target->holders_dir,
1793					&mod->mkobj.kobj, mod->name);
1794		if (ret)
1795			break;
1796	}
1797	mutex_unlock(&module_mutex);
1798	if (ret)
1799		del_usage_links(mod);
1800#endif
1801	return ret;
1802}
1803
1804static void module_remove_modinfo_attrs(struct module *mod, int end);
1805
1806static int module_add_modinfo_attrs(struct module *mod)
1807{
1808	struct module_attribute *attr;
1809	struct module_attribute *temp_attr;
1810	int error = 0;
1811	int i;
1812
1813	mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1814					(ARRAY_SIZE(modinfo_attrs) + 1)),
1815					GFP_KERNEL);
1816	if (!mod->modinfo_attrs)
1817		return -ENOMEM;
1818
1819	temp_attr = mod->modinfo_attrs;
1820	for (i = 0; (attr = modinfo_attrs[i]); i++) {
1821		if (!attr->test || attr->test(mod)) {
 
1822			memcpy(temp_attr, attr, sizeof(*temp_attr));
1823			sysfs_attr_init(&temp_attr->attr);
1824			error = sysfs_create_file(&mod->mkobj.kobj,
1825					&temp_attr->attr);
1826			if (error)
1827				goto error_out;
1828			++temp_attr;
1829		}
1830	}
1831
1832	return 0;
1833
1834error_out:
1835	if (i > 0)
1836		module_remove_modinfo_attrs(mod, --i);
1837	else
1838		kfree(mod->modinfo_attrs);
1839	return error;
1840}
1841
1842static void module_remove_modinfo_attrs(struct module *mod, int end)
1843{
1844	struct module_attribute *attr;
1845	int i;
1846
1847	for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1848		if (end >= 0 && i > end)
1849			break;
1850		/* pick a field to test for end of list */
1851		if (!attr->attr.name)
1852			break;
1853		sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1854		if (attr->free)
1855			attr->free(mod);
1856	}
1857	kfree(mod->modinfo_attrs);
1858}
1859
1860static void mod_kobject_put(struct module *mod)
1861{
1862	DECLARE_COMPLETION_ONSTACK(c);
1863	mod->mkobj.kobj_completion = &c;
1864	kobject_put(&mod->mkobj.kobj);
1865	wait_for_completion(&c);
1866}
1867
1868static int mod_sysfs_init(struct module *mod)
1869{
1870	int err;
1871	struct kobject *kobj;
1872
1873	if (!module_sysfs_initialized) {
1874		pr_err("%s: module sysfs not initialized\n", mod->name);
 
1875		err = -EINVAL;
1876		goto out;
1877	}
1878
1879	kobj = kset_find_obj(module_kset, mod->name);
1880	if (kobj) {
1881		pr_err("%s: module is already loaded\n", mod->name);
1882		kobject_put(kobj);
1883		err = -EINVAL;
1884		goto out;
1885	}
1886
1887	mod->mkobj.mod = mod;
1888
1889	memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1890	mod->mkobj.kobj.kset = module_kset;
1891	err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1892				   "%s", mod->name);
1893	if (err)
1894		mod_kobject_put(mod);
1895
1896	/* delay uevent until full sysfs population */
1897out:
1898	return err;
1899}
1900
1901static int mod_sysfs_setup(struct module *mod,
1902			   const struct load_info *info,
1903			   struct kernel_param *kparam,
1904			   unsigned int num_params)
1905{
1906	int err;
1907
1908	err = mod_sysfs_init(mod);
1909	if (err)
1910		goto out;
1911
1912	mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1913	if (!mod->holders_dir) {
1914		err = -ENOMEM;
1915		goto out_unreg;
1916	}
1917
1918	err = module_param_sysfs_setup(mod, kparam, num_params);
1919	if (err)
1920		goto out_unreg_holders;
1921
1922	err = module_add_modinfo_attrs(mod);
1923	if (err)
1924		goto out_unreg_param;
1925
1926	err = add_usage_links(mod);
1927	if (err)
1928		goto out_unreg_modinfo_attrs;
1929
1930	add_sect_attrs(mod, info);
1931	add_notes_attrs(mod, info);
1932
1933	kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1934	return 0;
1935
1936out_unreg_modinfo_attrs:
1937	module_remove_modinfo_attrs(mod, -1);
1938out_unreg_param:
1939	module_param_sysfs_remove(mod);
1940out_unreg_holders:
1941	kobject_put(mod->holders_dir);
1942out_unreg:
1943	mod_kobject_put(mod);
1944out:
1945	return err;
1946}
1947
1948static void mod_sysfs_fini(struct module *mod)
1949{
1950	remove_notes_attrs(mod);
1951	remove_sect_attrs(mod);
1952	mod_kobject_put(mod);
1953}
1954
1955static void init_param_lock(struct module *mod)
1956{
1957	mutex_init(&mod->param_lock);
1958}
1959#else /* !CONFIG_SYSFS */
1960
1961static int mod_sysfs_setup(struct module *mod,
1962			   const struct load_info *info,
1963			   struct kernel_param *kparam,
1964			   unsigned int num_params)
1965{
1966	return 0;
1967}
1968
1969static void mod_sysfs_fini(struct module *mod)
1970{
1971}
1972
1973static void module_remove_modinfo_attrs(struct module *mod, int end)
1974{
1975}
1976
1977static void del_usage_links(struct module *mod)
1978{
1979}
1980
1981static void init_param_lock(struct module *mod)
1982{
1983}
1984#endif /* CONFIG_SYSFS */
1985
1986static void mod_sysfs_teardown(struct module *mod)
1987{
1988	del_usage_links(mod);
1989	module_remove_modinfo_attrs(mod, -1);
1990	module_param_sysfs_remove(mod);
1991	kobject_put(mod->mkobj.drivers_dir);
1992	kobject_put(mod->holders_dir);
1993	mod_sysfs_fini(mod);
1994}
1995
1996/*
1997 * LKM RO/NX protection: protect module's text/ro-data
1998 * from modification and any data from execution.
1999 *
2000 * General layout of module is:
2001 *          [text] [read-only-data] [ro-after-init] [writable data]
2002 * text_size -----^                ^               ^               ^
2003 * ro_size ------------------------|               |               |
2004 * ro_after_init_size -----------------------------|               |
2005 * size -----------------------------------------------------------|
2006 *
2007 * These values are always page-aligned (as is base)
2008 */
 
 
 
 
 
 
 
2009
 
2010/*
2011 * Since some arches are moving towards PAGE_KERNEL module allocations instead
2012 * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the
2013 * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of
2014 * whether we are strict.
2015 */
2016#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
2017static void frob_text(const struct module_layout *layout,
2018		      int (*set_memory)(unsigned long start, int num_pages))
2019{
2020	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2021	BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
2022	set_memory((unsigned long)layout->base,
2023		   layout->text_size >> PAGE_SHIFT);
2024}
2025
2026static void module_enable_x(const struct module *mod)
2027{
2028	frob_text(&mod->core_layout, set_memory_x);
2029	frob_text(&mod->init_layout, set_memory_x);
2030}
2031#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2032static void module_enable_x(const struct module *mod) { }
2033#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2034
2035#ifdef CONFIG_STRICT_MODULE_RWX
2036static void frob_rodata(const struct module_layout *layout,
2037			int (*set_memory)(unsigned long start, int num_pages))
 
2038{
2039	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2040	BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
2041	BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
2042	set_memory((unsigned long)layout->base + layout->text_size,
2043		   (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
2044}
2045
2046static void frob_ro_after_init(const struct module_layout *layout,
2047				int (*set_memory)(unsigned long start, int num_pages))
2048{
2049	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2050	BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
2051	BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
2052	set_memory((unsigned long)layout->base + layout->ro_size,
2053		   (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
2054}
2055
2056static void frob_writable_data(const struct module_layout *layout,
2057			       int (*set_memory)(unsigned long start, int num_pages))
2058{
2059	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2060	BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
2061	BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
2062	set_memory((unsigned long)layout->base + layout->ro_after_init_size,
2063		   (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
 
 
 
2064}
2065
2066static void module_enable_ro(const struct module *mod, bool after_init)
2067{
2068	if (!rodata_enabled)
2069		return;
2070
2071	set_vm_flush_reset_perms(mod->core_layout.base);
2072	set_vm_flush_reset_perms(mod->init_layout.base);
2073	frob_text(&mod->core_layout, set_memory_ro);
2074
2075	frob_rodata(&mod->core_layout, set_memory_ro);
2076	frob_text(&mod->init_layout, set_memory_ro);
2077	frob_rodata(&mod->init_layout, set_memory_ro);
2078
2079	if (after_init)
2080		frob_ro_after_init(&mod->core_layout, set_memory_ro);
2081}
2082
2083static void module_enable_nx(const struct module *mod)
2084{
2085	frob_rodata(&mod->core_layout, set_memory_nx);
2086	frob_ro_after_init(&mod->core_layout, set_memory_nx);
2087	frob_writable_data(&mod->core_layout, set_memory_nx);
2088	frob_rodata(&mod->init_layout, set_memory_nx);
2089	frob_writable_data(&mod->init_layout, set_memory_nx);
 
2090}
2091
2092static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2093				       char *secstrings, struct module *mod)
2094{
2095	const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR;
2096	int i;
2097
2098	for (i = 0; i < hdr->e_shnum; i++) {
2099		if ((sechdrs[i].sh_flags & shf_wx) == shf_wx)
2100			return -ENOEXEC;
 
 
 
 
 
 
 
 
 
2101	}
2102
2103	return 0;
2104}
2105
2106#else /* !CONFIG_STRICT_MODULE_RWX */
2107static void module_enable_nx(const struct module *mod) { }
2108static void module_enable_ro(const struct module *mod, bool after_init) {}
2109static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2110				       char *secstrings, struct module *mod)
2111{
2112	return 0;
2113}
2114#endif /*  CONFIG_STRICT_MODULE_RWX */
2115
2116#ifdef CONFIG_LIVEPATCH
2117/*
2118 * Persist Elf information about a module. Copy the Elf header,
2119 * section header table, section string table, and symtab section
2120 * index from info to mod->klp_info.
2121 */
2122static int copy_module_elf(struct module *mod, struct load_info *info)
2123{
2124	unsigned int size, symndx;
2125	int ret;
2126
2127	size = sizeof(*mod->klp_info);
2128	mod->klp_info = kmalloc(size, GFP_KERNEL);
2129	if (mod->klp_info == NULL)
2130		return -ENOMEM;
2131
2132	/* Elf header */
2133	size = sizeof(mod->klp_info->hdr);
2134	memcpy(&mod->klp_info->hdr, info->hdr, size);
2135
2136	/* Elf section header table */
2137	size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
2138	mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL);
2139	if (mod->klp_info->sechdrs == NULL) {
2140		ret = -ENOMEM;
2141		goto free_info;
2142	}
2143
2144	/* Elf section name string table */
2145	size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
2146	mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL);
2147	if (mod->klp_info->secstrings == NULL) {
2148		ret = -ENOMEM;
2149		goto free_sechdrs;
2150	}
2151
2152	/* Elf symbol section index */
2153	symndx = info->index.sym;
2154	mod->klp_info->symndx = symndx;
2155
2156	/*
2157	 * For livepatch modules, core_kallsyms.symtab is a complete
2158	 * copy of the original symbol table. Adjust sh_addr to point
2159	 * to core_kallsyms.symtab since the copy of the symtab in module
2160	 * init memory is freed at the end of do_init_module().
2161	 */
2162	mod->klp_info->sechdrs[symndx].sh_addr = \
2163		(unsigned long) mod->core_kallsyms.symtab;
2164
2165	return 0;
2166
2167free_sechdrs:
2168	kfree(mod->klp_info->sechdrs);
2169free_info:
2170	kfree(mod->klp_info);
2171	return ret;
2172}
 
 
 
 
 
2173
2174static void free_module_elf(struct module *mod)
2175{
2176	kfree(mod->klp_info->sechdrs);
2177	kfree(mod->klp_info->secstrings);
2178	kfree(mod->klp_info);
2179}
2180#else /* !CONFIG_LIVEPATCH */
2181static int copy_module_elf(struct module *mod, struct load_info *info)
2182{
2183	return 0;
2184}
2185
2186static void free_module_elf(struct module *mod)
2187{
2188}
2189#endif /* CONFIG_LIVEPATCH */
2190
2191void __weak module_memfree(void *module_region)
2192{
2193	/*
2194	 * This memory may be RO, and freeing RO memory in an interrupt is not
2195	 * supported by vmalloc.
2196	 */
2197	WARN_ON(in_interrupt());
2198	vfree(module_region);
2199}
2200
2201void __weak module_arch_cleanup(struct module *mod)
2202{
2203}
2204
2205void __weak module_arch_freeing_init(struct module *mod)
2206{
2207}
2208
2209/* Free a module, remove from lists, etc. */
2210static void free_module(struct module *mod)
2211{
2212	trace_module_free(mod);
2213
2214	mod_sysfs_teardown(mod);
2215
2216	/* We leave it in list to prevent duplicate loads, but make sure
2217	 * that noone uses it while it's being deconstructed. */
2218	mutex_lock(&module_mutex);
2219	mod->state = MODULE_STATE_UNFORMED;
2220	mutex_unlock(&module_mutex);
 
2221
2222	/* Remove dynamic debug info */
2223	ddebug_remove_module(mod->name);
2224
2225	/* Arch-specific cleanup. */
2226	module_arch_cleanup(mod);
2227
2228	/* Module unload stuff */
2229	module_unload_free(mod);
2230
2231	/* Free any allocated parameters. */
2232	destroy_params(mod->kp, mod->num_kp);
2233
2234	if (is_livepatch_module(mod))
2235		free_module_elf(mod);
2236
2237	/* Now we can delete it from the lists */
2238	mutex_lock(&module_mutex);
2239	/* Unlink carefully: kallsyms could be walking list. */
2240	list_del_rcu(&mod->list);
2241	mod_tree_remove(mod);
2242	/* Remove this module from bug list, this uses list_del_rcu */
2243	module_bug_cleanup(mod);
2244	/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2245	synchronize_rcu();
2246	mutex_unlock(&module_mutex);
2247
2248	/* This may be empty, but that's OK */
2249	module_arch_freeing_init(mod);
2250	module_memfree(mod->init_layout.base);
2251	kfree(mod->args);
2252	percpu_modfree(mod);
2253
2254	/* Free lock-classes; relies on the preceding sync_rcu(). */
2255	lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2256
2257	/* Finally, free the core (containing the module structure) */
2258	module_memfree(mod->core_layout.base);
 
 
 
 
 
2259}
2260
2261void *__symbol_get(const char *symbol)
2262{
2263	struct module *owner;
2264	const struct kernel_symbol *sym;
2265
2266	preempt_disable();
2267	sym = find_symbol(symbol, &owner, NULL, NULL, true, true);
2268	if (sym && strong_try_module_get(owner))
2269		sym = NULL;
2270	preempt_enable();
2271
2272	return sym ? (void *)kernel_symbol_value(sym) : NULL;
2273}
2274EXPORT_SYMBOL_GPL(__symbol_get);
2275
2276/*
2277 * Ensure that an exported symbol [global namespace] does not already exist
2278 * in the kernel or in some other module's exported symbol table.
2279 *
2280 * You must hold the module_mutex.
2281 */
2282static int verify_exported_symbols(struct module *mod)
2283{
2284	unsigned int i;
2285	struct module *owner;
2286	const struct kernel_symbol *s;
2287	struct {
2288		const struct kernel_symbol *sym;
2289		unsigned int num;
2290	} arr[] = {
2291		{ mod->syms, mod->num_syms },
2292		{ mod->gpl_syms, mod->num_gpl_syms },
2293		{ mod->gpl_future_syms, mod->num_gpl_future_syms },
2294#ifdef CONFIG_UNUSED_SYMBOLS
2295		{ mod->unused_syms, mod->num_unused_syms },
2296		{ mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2297#endif
2298	};
2299
2300	for (i = 0; i < ARRAY_SIZE(arr); i++) {
2301		for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2302			if (find_symbol(kernel_symbol_name(s), &owner, NULL,
2303					NULL, true, false)) {
2304				pr_err("%s: exports duplicate symbol %s"
2305				       " (owned by %s)\n",
2306				       mod->name, kernel_symbol_name(s),
2307				       module_name(owner));
2308				return -ENOEXEC;
2309			}
2310		}
2311	}
2312	return 0;
2313}
2314
2315/* Change all symbols so that st_value encodes the pointer directly. */
2316static int simplify_symbols(struct module *mod, const struct load_info *info)
2317{
2318	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2319	Elf_Sym *sym = (void *)symsec->sh_addr;
2320	unsigned long secbase;
2321	unsigned int i;
2322	int ret = 0;
2323	const struct kernel_symbol *ksym;
2324
2325	for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2326		const char *name = info->strtab + sym[i].st_name;
2327
2328		switch (sym[i].st_shndx) {
2329		case SHN_COMMON:
2330			/* Ignore common symbols */
2331			if (!strncmp(name, "__gnu_lto", 9))
2332				break;
2333
2334			/* We compiled with -fno-common.  These are not
2335			   supposed to happen.  */
2336			pr_debug("Common symbol: %s\n", name);
2337			pr_warn("%s: please compile with -fno-common\n",
2338			       mod->name);
2339			ret = -ENOEXEC;
2340			break;
2341
2342		case SHN_ABS:
2343			/* Don't need to do anything */
2344			pr_debug("Absolute symbol: 0x%08lx\n",
2345			       (long)sym[i].st_value);
2346			break;
2347
2348		case SHN_LIVEPATCH:
2349			/* Livepatch symbols are resolved by livepatch */
2350			break;
2351
2352		case SHN_UNDEF:
2353			ksym = resolve_symbol_wait(mod, info, name);
2354			/* Ok if resolved.  */
2355			if (ksym && !IS_ERR(ksym)) {
2356				sym[i].st_value = kernel_symbol_value(ksym);
2357				break;
2358			}
2359
2360			/* Ok if weak.  */
2361			if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2362				break;
2363
 
 
2364			ret = PTR_ERR(ksym) ?: -ENOENT;
2365			pr_warn("%s: Unknown symbol %s (err %d)\n",
2366				mod->name, name, ret);
2367			break;
2368
2369		default:
2370			/* Divert to percpu allocation if a percpu var. */
2371			if (sym[i].st_shndx == info->index.pcpu)
2372				secbase = (unsigned long)mod_percpu(mod);
2373			else
2374				secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2375			sym[i].st_value += secbase;
2376			break;
2377		}
2378	}
2379
2380	return ret;
2381}
2382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2383static int apply_relocations(struct module *mod, const struct load_info *info)
2384{
2385	unsigned int i;
2386	int err = 0;
2387
2388	/* Now do relocations. */
2389	for (i = 1; i < info->hdr->e_shnum; i++) {
2390		unsigned int infosec = info->sechdrs[i].sh_info;
2391
2392		/* Not a valid relocation section? */
2393		if (infosec >= info->hdr->e_shnum)
2394			continue;
2395
2396		/* Don't bother with non-allocated sections */
2397		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2398			continue;
2399
2400		if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2401			err = klp_apply_section_relocs(mod, info->sechdrs,
2402						       info->secstrings,
2403						       info->strtab,
2404						       info->index.sym, i,
2405						       NULL);
2406		else if (info->sechdrs[i].sh_type == SHT_REL)
2407			err = apply_relocate(info->sechdrs, info->strtab,
2408					     info->index.sym, i, mod);
2409		else if (info->sechdrs[i].sh_type == SHT_RELA)
2410			err = apply_relocate_add(info->sechdrs, info->strtab,
2411						 info->index.sym, i, mod);
2412		if (err < 0)
2413			break;
2414	}
2415	return err;
2416}
2417
2418/* Additional bytes needed by arch in front of individual sections */
2419unsigned int __weak arch_mod_section_prepend(struct module *mod,
2420					     unsigned int section)
2421{
2422	/* default implementation just returns zero */
2423	return 0;
2424}
2425
2426/* Update size with this section: return offset. */
2427static long get_offset(struct module *mod, unsigned int *size,
2428		       Elf_Shdr *sechdr, unsigned int section)
2429{
2430	long ret;
2431
2432	*size += arch_mod_section_prepend(mod, section);
2433	ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2434	*size = ret + sechdr->sh_size;
2435	return ret;
2436}
2437
2438/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2439   might -- code, read-only data, read-write data, small data.  Tally
2440   sizes, and place the offsets into sh_entsize fields: high bit means it
2441   belongs in init. */
2442static void layout_sections(struct module *mod, struct load_info *info)
2443{
2444	static unsigned long const masks[][2] = {
2445		/* NOTE: all executable code must be the first section
2446		 * in this array; otherwise modify the text_size
2447		 * finder in the two loops below */
2448		{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2449		{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2450		{ SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
2451		{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2452		{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2453	};
2454	unsigned int m, i;
2455
2456	for (i = 0; i < info->hdr->e_shnum; i++)
2457		info->sechdrs[i].sh_entsize = ~0UL;
2458
2459	pr_debug("Core section allocation order:\n");
2460	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2461		for (i = 0; i < info->hdr->e_shnum; ++i) {
2462			Elf_Shdr *s = &info->sechdrs[i];
2463			const char *sname = info->secstrings + s->sh_name;
2464
2465			if ((s->sh_flags & masks[m][0]) != masks[m][0]
2466			    || (s->sh_flags & masks[m][1])
2467			    || s->sh_entsize != ~0UL
2468			    || module_init_section(sname))
2469				continue;
2470			s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2471			pr_debug("\t%s\n", sname);
2472		}
2473		switch (m) {
2474		case 0: /* executable */
2475			mod->core_layout.size = debug_align(mod->core_layout.size);
2476			mod->core_layout.text_size = mod->core_layout.size;
2477			break;
2478		case 1: /* RO: text and ro-data */
2479			mod->core_layout.size = debug_align(mod->core_layout.size);
2480			mod->core_layout.ro_size = mod->core_layout.size;
2481			break;
2482		case 2: /* RO after init */
2483			mod->core_layout.size = debug_align(mod->core_layout.size);
2484			mod->core_layout.ro_after_init_size = mod->core_layout.size;
2485			break;
2486		case 4: /* whole core */
2487			mod->core_layout.size = debug_align(mod->core_layout.size);
2488			break;
2489		}
2490	}
2491
2492	pr_debug("Init section allocation order:\n");
2493	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2494		for (i = 0; i < info->hdr->e_shnum; ++i) {
2495			Elf_Shdr *s = &info->sechdrs[i];
2496			const char *sname = info->secstrings + s->sh_name;
2497
2498			if ((s->sh_flags & masks[m][0]) != masks[m][0]
2499			    || (s->sh_flags & masks[m][1])
2500			    || s->sh_entsize != ~0UL
2501			    || !module_init_section(sname))
2502				continue;
2503			s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2504					 | INIT_OFFSET_MASK);
2505			pr_debug("\t%s\n", sname);
2506		}
2507		switch (m) {
2508		case 0: /* executable */
2509			mod->init_layout.size = debug_align(mod->init_layout.size);
2510			mod->init_layout.text_size = mod->init_layout.size;
2511			break;
2512		case 1: /* RO: text and ro-data */
2513			mod->init_layout.size = debug_align(mod->init_layout.size);
2514			mod->init_layout.ro_size = mod->init_layout.size;
2515			break;
2516		case 2:
2517			/*
2518			 * RO after init doesn't apply to init_layout (only
2519			 * core_layout), so it just takes the value of ro_size.
2520			 */
2521			mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
2522			break;
2523		case 4: /* whole init */
2524			mod->init_layout.size = debug_align(mod->init_layout.size);
2525			break;
2526		}
2527	}
2528}
2529
2530static void set_license(struct module *mod, const char *license)
2531{
2532	if (!license)
2533		license = "unspecified";
2534
2535	if (!license_is_gpl_compatible(license)) {
2536		if (!test_taint(TAINT_PROPRIETARY_MODULE))
2537			pr_warn("%s: module license '%s' taints kernel.\n",
2538				mod->name, license);
2539		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2540				 LOCKDEP_NOW_UNRELIABLE);
2541	}
2542}
2543
2544/* Parse tag=value strings from .modinfo section */
2545static char *next_string(char *string, unsigned long *secsize)
2546{
2547	/* Skip non-zero chars */
2548	while (string[0]) {
2549		string++;
2550		if ((*secsize)-- <= 1)
2551			return NULL;
2552	}
2553
2554	/* Skip any zero padding. */
2555	while (!string[0]) {
2556		string++;
2557		if ((*secsize)-- <= 1)
2558			return NULL;
2559	}
2560	return string;
2561}
2562
2563static char *get_next_modinfo(const struct load_info *info, const char *tag,
2564			      char *prev)
2565{
2566	char *p;
2567	unsigned int taglen = strlen(tag);
2568	Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2569	unsigned long size = infosec->sh_size;
2570
2571	/*
2572	 * get_modinfo() calls made before rewrite_section_headers()
2573	 * must use sh_offset, as sh_addr isn't set!
2574	 */
2575	char *modinfo = (char *)info->hdr + infosec->sh_offset;
2576
2577	if (prev) {
2578		size -= prev - modinfo;
2579		modinfo = next_string(prev, &size);
2580	}
2581
2582	for (p = modinfo; p; p = next_string(p, &size)) {
2583		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2584			return p + taglen + 1;
2585	}
2586	return NULL;
2587}
2588
2589static char *get_modinfo(const struct load_info *info, const char *tag)
2590{
2591	return get_next_modinfo(info, tag, NULL);
2592}
2593
2594static void setup_modinfo(struct module *mod, struct load_info *info)
2595{
2596	struct module_attribute *attr;
2597	int i;
2598
2599	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2600		if (attr->setup)
2601			attr->setup(mod, get_modinfo(info, attr->attr.name));
2602	}
2603}
2604
2605static void free_modinfo(struct module *mod)
2606{
2607	struct module_attribute *attr;
2608	int i;
2609
2610	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2611		if (attr->free)
2612			attr->free(mod);
2613	}
2614}
2615
2616#ifdef CONFIG_KALLSYMS
2617
2618/* Lookup exported symbol in given range of kernel_symbols */
2619static const struct kernel_symbol *lookup_exported_symbol(const char *name,
2620							  const struct kernel_symbol *start,
2621							  const struct kernel_symbol *stop)
2622{
2623	return bsearch(name, start, stop - start,
2624			sizeof(struct kernel_symbol), cmp_name);
2625}
2626
2627static int is_exported(const char *name, unsigned long value,
2628		       const struct module *mod)
2629{
2630	const struct kernel_symbol *ks;
2631	if (!mod)
2632		ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
2633	else
2634		ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
2635
2636	return ks != NULL && kernel_symbol_value(ks) == value;
2637}
2638
2639/* As per nm */
2640static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2641{
2642	const Elf_Shdr *sechdrs = info->sechdrs;
2643
2644	if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2645		if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2646			return 'v';
2647		else
2648			return 'w';
2649	}
2650	if (sym->st_shndx == SHN_UNDEF)
2651		return 'U';
2652	if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
2653		return 'a';
2654	if (sym->st_shndx >= SHN_LORESERVE)
2655		return '?';
2656	if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2657		return 't';
2658	if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2659	    && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2660		if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2661			return 'r';
2662		else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2663			return 'g';
2664		else
2665			return 'd';
2666	}
2667	if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2668		if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2669			return 's';
2670		else
2671			return 'b';
2672	}
2673	if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2674		      ".debug")) {
2675		return 'n';
2676	}
2677	return '?';
2678}
2679
2680static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2681			unsigned int shnum, unsigned int pcpundx)
2682{
2683	const Elf_Shdr *sec;
2684
2685	if (src->st_shndx == SHN_UNDEF
2686	    || src->st_shndx >= shnum
2687	    || !src->st_name)
2688		return false;
2689
2690#ifdef CONFIG_KALLSYMS_ALL
2691	if (src->st_shndx == pcpundx)
2692		return true;
2693#endif
2694
2695	sec = sechdrs + src->st_shndx;
2696	if (!(sec->sh_flags & SHF_ALLOC)
2697#ifndef CONFIG_KALLSYMS_ALL
2698	    || !(sec->sh_flags & SHF_EXECINSTR)
2699#endif
2700	    || (sec->sh_entsize & INIT_OFFSET_MASK))
2701		return false;
2702
2703	return true;
2704}
2705
2706/*
2707 * We only allocate and copy the strings needed by the parts of symtab
2708 * we keep.  This is simple, but has the effect of making multiple
2709 * copies of duplicates.  We could be more sophisticated, see
2710 * linux-kernel thread starting with
2711 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2712 */
2713static void layout_symtab(struct module *mod, struct load_info *info)
2714{
2715	Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2716	Elf_Shdr *strsect = info->sechdrs + info->index.str;
2717	const Elf_Sym *src;
2718	unsigned int i, nsrc, ndst, strtab_size = 0;
2719
2720	/* Put symbol section at end of init part of module. */
2721	symsect->sh_flags |= SHF_ALLOC;
2722	symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2723					 info->index.sym) | INIT_OFFSET_MASK;
2724	pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2725
2726	src = (void *)info->hdr + symsect->sh_offset;
2727	nsrc = symsect->sh_size / sizeof(*src);
2728
2729	/* Compute total space required for the core symbols' strtab. */
2730	for (ndst = i = 0; i < nsrc; i++) {
2731		if (i == 0 || is_livepatch_module(mod) ||
2732		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2733				   info->index.pcpu)) {
2734			strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2735			ndst++;
2736		}
2737	}
2738
2739	/* Append room for core symbols at end of core part. */
2740	info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2741	info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2742	mod->core_layout.size += strtab_size;
2743	info->core_typeoffs = mod->core_layout.size;
2744	mod->core_layout.size += ndst * sizeof(char);
2745	mod->core_layout.size = debug_align(mod->core_layout.size);
2746
2747	/* Put string table section at end of init part of module. */
2748	strsect->sh_flags |= SHF_ALLOC;
2749	strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2750					 info->index.str) | INIT_OFFSET_MASK;
2751	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2752
2753	/* We'll tack temporary mod_kallsyms on the end. */
2754	mod->init_layout.size = ALIGN(mod->init_layout.size,
2755				      __alignof__(struct mod_kallsyms));
2756	info->mod_kallsyms_init_off = mod->init_layout.size;
2757	mod->init_layout.size += sizeof(struct mod_kallsyms);
2758	info->init_typeoffs = mod->init_layout.size;
2759	mod->init_layout.size += nsrc * sizeof(char);
2760	mod->init_layout.size = debug_align(mod->init_layout.size);
2761}
2762
2763/*
2764 * We use the full symtab and strtab which layout_symtab arranged to
2765 * be appended to the init section.  Later we switch to the cut-down
2766 * core-only ones.
2767 */
2768static void add_kallsyms(struct module *mod, const struct load_info *info)
2769{
2770	unsigned int i, ndst;
2771	const Elf_Sym *src;
2772	Elf_Sym *dst;
2773	char *s;
2774	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2775
2776	/* Set up to point into init section. */
2777	mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2778
2779	mod->kallsyms->symtab = (void *)symsec->sh_addr;
2780	mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2781	/* Make sure we get permanent strtab: don't use info->strtab. */
2782	mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2783	mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs;
2784
2785	/*
2786	 * Now populate the cut down core kallsyms for after init
2787	 * and set types up while we still have access to sections.
2788	 */
2789	mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2790	mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2791	mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs;
2792	src = mod->kallsyms->symtab;
2793	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2794		mod->kallsyms->typetab[i] = elf_type(src + i, info);
2795		if (i == 0 || is_livepatch_module(mod) ||
2796		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2797				   info->index.pcpu)) {
2798			mod->core_kallsyms.typetab[ndst] =
2799			    mod->kallsyms->typetab[i];
2800			dst[ndst] = src[i];
2801			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2802			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2803				     KSYM_NAME_LEN) + 1;
2804		}
2805	}
2806	mod->core_kallsyms.num_symtab = ndst;
2807}
2808#else
2809static inline void layout_symtab(struct module *mod, struct load_info *info)
2810{
2811}
2812
2813static void add_kallsyms(struct module *mod, const struct load_info *info)
2814{
2815}
2816#endif /* CONFIG_KALLSYMS */
2817
2818static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num)
2819{
2820	if (!debug)
2821		return;
2822	ddebug_add_module(debug, num, mod->name);
 
 
 
 
2823}
2824
2825static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
2826{
2827	if (debug)
2828		ddebug_remove_module(mod->name);
2829}
2830
2831void * __weak module_alloc(unsigned long size)
2832{
2833	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2834			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2835			NUMA_NO_NODE, __builtin_return_address(0));
2836}
2837
2838bool __weak module_init_section(const char *name)
2839{
2840	return strstarts(name, ".init");
2841}
2842
2843bool __weak module_exit_section(const char *name)
2844{
2845	return strstarts(name, ".exit");
 
 
 
 
 
 
 
2846}
2847
2848#ifdef CONFIG_DEBUG_KMEMLEAK
2849static void kmemleak_load_module(const struct module *mod,
2850				 const struct load_info *info)
2851{
2852	unsigned int i;
2853
2854	/* only scan the sections containing data */
2855	kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2856
2857	for (i = 1; i < info->hdr->e_shnum; i++) {
2858		/* Scan all writable sections that's not executable */
2859		if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2860		    !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2861		    (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2862			continue;
2863
2864		kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2865				   info->sechdrs[i].sh_size, GFP_KERNEL);
2866	}
2867}
2868#else
2869static inline void kmemleak_load_module(const struct module *mod,
2870					const struct load_info *info)
2871{
2872}
2873#endif
2874
2875#ifdef CONFIG_MODULE_SIG
2876static int module_sig_check(struct load_info *info, int flags)
 
 
2877{
2878	int err = -ENODATA;
2879	const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2880	const char *reason;
2881	const void *mod = info->hdr;
2882
2883	/*
2884	 * Require flags == 0, as a module with version information
2885	 * removed is no longer the module that was signed
2886	 */
2887	if (flags == 0 &&
2888	    info->len > markerlen &&
2889	    memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2890		/* We truncate the module to discard the signature */
2891		info->len -= markerlen;
2892		err = mod_verify_sig(mod, info);
2893	}
2894
2895	switch (err) {
2896	case 0:
2897		info->sig_ok = true;
2898		return 0;
2899
2900		/* We don't permit modules to be loaded into trusted kernels
2901		 * without a valid signature on them, but if we're not
2902		 * enforcing, certain errors are non-fatal.
2903		 */
2904	case -ENODATA:
2905		reason = "Loading of unsigned module";
2906		goto decide;
2907	case -ENOPKG:
2908		reason = "Loading of module with unsupported crypto";
2909		goto decide;
2910	case -ENOKEY:
2911		reason = "Loading of module with unavailable key";
2912	decide:
2913		if (is_module_sig_enforced()) {
2914			pr_notice("%s: %s is rejected\n", info->name, reason);
2915			return -EKEYREJECTED;
2916		}
2917
2918		return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
2919
2920		/* All other errors are fatal, including nomem, unparseable
2921		 * signatures and signature check failures - even if signatures
2922		 * aren't required.
2923		 */
2924	default:
2925		return err;
2926	}
2927}
2928#else /* !CONFIG_MODULE_SIG */
2929static int module_sig_check(struct load_info *info, int flags)
2930{
2931	return 0;
2932}
2933#endif /* !CONFIG_MODULE_SIG */
2934
2935/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2936static int elf_header_check(struct load_info *info)
2937{
2938	if (info->len < sizeof(*(info->hdr)))
2939		return -ENOEXEC;
2940
2941	if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2942	    || info->hdr->e_type != ET_REL
2943	    || !elf_check_arch(info->hdr)
2944	    || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2945		return -ENOEXEC;
2946
2947	if (info->hdr->e_shoff >= info->len
2948	    || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2949		info->len - info->hdr->e_shoff))
2950		return -ENOEXEC;
2951
2952	return 0;
2953}
2954
2955#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2956
2957static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2958{
2959	do {
2960		unsigned long n = min(len, COPY_CHUNK_SIZE);
2961
2962		if (copy_from_user(dst, usrc, n) != 0)
2963			return -EFAULT;
2964		cond_resched();
2965		dst += n;
2966		usrc += n;
2967		len -= n;
2968	} while (len);
2969	return 0;
2970}
2971
2972#ifdef CONFIG_LIVEPATCH
2973static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2974{
2975	if (get_modinfo(info, "livepatch")) {
2976		mod->klp = true;
2977		add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
2978		pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
2979			       mod->name);
2980	}
2981
2982	return 0;
2983}
2984#else /* !CONFIG_LIVEPATCH */
2985static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2986{
2987	if (get_modinfo(info, "livepatch")) {
2988		pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2989		       mod->name);
2990		return -ENOEXEC;
2991	}
2992
 
 
2993	return 0;
2994}
2995#endif /* CONFIG_LIVEPATCH */
2996
2997static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
2998{
2999	if (retpoline_module_ok(get_modinfo(info, "retpoline")))
3000		return;
3001
3002	pr_warn("%s: loading module not compiled with retpoline compiler.\n",
3003		mod->name);
3004}
3005
3006/* Sets info->hdr and info->len. */
3007static int copy_module_from_user(const void __user *umod, unsigned long len,
3008				  struct load_info *info)
3009{
3010	int err;
3011
3012	info->len = len;
3013	if (info->len < sizeof(*(info->hdr)))
3014		return -ENOEXEC;
3015
3016	err = security_kernel_load_data(LOADING_MODULE);
3017	if (err)
3018		return err;
3019
3020	/* Suck in entire file: we'll want most of it. */
3021	info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
3022	if (!info->hdr)
3023		return -ENOMEM;
3024
3025	if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
3026		vfree(info->hdr);
3027		return -EFAULT;
3028	}
3029
3030	return 0;
3031}
3032
3033static void free_copy(struct load_info *info)
3034{
3035	vfree(info->hdr);
3036}
3037
3038static int rewrite_section_headers(struct load_info *info, int flags)
3039{
3040	unsigned int i;
3041
3042	/* This should always be true, but let's be sure. */
3043	info->sechdrs[0].sh_addr = 0;
3044
3045	for (i = 1; i < info->hdr->e_shnum; i++) {
3046		Elf_Shdr *shdr = &info->sechdrs[i];
3047		if (shdr->sh_type != SHT_NOBITS
3048		    && info->len < shdr->sh_offset + shdr->sh_size) {
3049			pr_err("Module len %lu truncated\n", info->len);
 
3050			return -ENOEXEC;
3051		}
3052
3053		/* Mark all sections sh_addr with their address in the
3054		   temporary image. */
3055		shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
3056
3057#ifndef CONFIG_MODULE_UNLOAD
3058		/* Don't load .exit sections */
3059		if (module_exit_section(info->secstrings+shdr->sh_name))
3060			shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
3061#endif
3062	}
3063
3064	/* Track but don't keep modinfo and version sections. */
 
 
 
3065	info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
3066	info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
3067
3068	return 0;
3069}
3070
3071/*
3072 * Set up our basic convenience variables (pointers to section headers,
3073 * search for module section index etc), and do some basic section
3074 * verification.
3075 *
3076 * Set info->mod to the temporary copy of the module in info->hdr. The final one
3077 * will be allocated in move_module().
3078 */
3079static int setup_load_info(struct load_info *info, int flags)
3080{
3081	unsigned int i;
 
 
3082
3083	/* Set up the convenience variables */
3084	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3085	info->secstrings = (void *)info->hdr
3086		+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
3087
3088	/* Try to find a name early so we can log errors with a module name */
3089	info->index.info = find_sec(info, ".modinfo");
3090	if (info->index.info)
3091		info->name = get_modinfo(info, "name");
3092
3093	/* Find internal symbols and strings. */
3094	for (i = 1; i < info->hdr->e_shnum; i++) {
3095		if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
3096			info->index.sym = i;
3097			info->index.str = info->sechdrs[i].sh_link;
3098			info->strtab = (char *)info->hdr
3099				+ info->sechdrs[info->index.str].sh_offset;
3100			break;
3101		}
3102	}
3103
3104	if (info->index.sym == 0) {
3105		pr_warn("%s: module has no symbols (stripped?)\n",
3106			info->name ?: "(missing .modinfo section or name field)");
3107		return -ENOEXEC;
3108	}
3109
3110	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
3111	if (!info->index.mod) {
3112		pr_warn("%s: No module found in object\n",
3113			info->name ?: "(missing .modinfo section or name field)");
3114		return -ENOEXEC;
3115	}
3116	/* This is temporary: point mod into copy of data. */
3117	info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
3118
3119	/*
3120	 * If we didn't load the .modinfo 'name' field earlier, fall back to
3121	 * on-disk struct mod 'name' field.
3122	 */
3123	if (!info->name)
3124		info->name = info->mod->name;
3125
3126	if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
3127		info->index.vers = 0; /* Pretend no __versions section! */
3128	else
3129		info->index.vers = find_sec(info, "__versions");
3130
3131	info->index.pcpu = find_pcpusec(info);
 
 
3132
3133	return 0;
3134}
3135
3136static int check_modinfo(struct module *mod, struct load_info *info, int flags)
3137{
3138	const char *modmagic = get_modinfo(info, "vermagic");
3139	int err;
3140
3141	if (flags & MODULE_INIT_IGNORE_VERMAGIC)
3142		modmagic = NULL;
3143
3144	/* This is allowed: modprobe --force will invalidate it. */
3145	if (!modmagic) {
3146		err = try_to_force_load(mod, "bad vermagic");
3147		if (err)
3148			return err;
3149	} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
3150		pr_err("%s: version magic '%s' should be '%s'\n",
3151		       info->name, modmagic, vermagic);
3152		return -ENOEXEC;
3153	}
3154
3155	if (!get_modinfo(info, "intree")) {
3156		if (!test_taint(TAINT_OOT_MODULE))
3157			pr_warn("%s: loading out-of-tree module taints kernel.\n",
3158				mod->name);
3159		add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
3160	}
3161
3162	check_modinfo_retpoline(mod, info);
3163
3164	if (get_modinfo(info, "staging")) {
3165		add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
3166		pr_warn("%s: module is from the staging directory, the quality "
3167			"is unknown, you have been warned.\n", mod->name);
 
3168	}
3169
3170	err = check_modinfo_livepatch(mod, info);
3171	if (err)
3172		return err;
3173
3174	/* Set up license info based on the info section */
3175	set_license(mod, get_modinfo(info, "license"));
3176
3177	return 0;
3178}
3179
3180static int find_module_sections(struct module *mod, struct load_info *info)
3181{
3182	mod->kp = section_objs(info, "__param",
3183			       sizeof(*mod->kp), &mod->num_kp);
3184	mod->syms = section_objs(info, "__ksymtab",
3185				 sizeof(*mod->syms), &mod->num_syms);
3186	mod->crcs = section_addr(info, "__kcrctab");
3187	mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
3188				     sizeof(*mod->gpl_syms),
3189				     &mod->num_gpl_syms);
3190	mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
3191	mod->gpl_future_syms = section_objs(info,
3192					    "__ksymtab_gpl_future",
3193					    sizeof(*mod->gpl_future_syms),
3194					    &mod->num_gpl_future_syms);
3195	mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
3196
3197#ifdef CONFIG_UNUSED_SYMBOLS
3198	mod->unused_syms = section_objs(info, "__ksymtab_unused",
3199					sizeof(*mod->unused_syms),
3200					&mod->num_unused_syms);
3201	mod->unused_crcs = section_addr(info, "__kcrctab_unused");
3202	mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
3203					    sizeof(*mod->unused_gpl_syms),
3204					    &mod->num_unused_gpl_syms);
3205	mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
3206#endif
3207#ifdef CONFIG_CONSTRUCTORS
3208	mod->ctors = section_objs(info, ".ctors",
3209				  sizeof(*mod->ctors), &mod->num_ctors);
3210	if (!mod->ctors)
3211		mod->ctors = section_objs(info, ".init_array",
3212				sizeof(*mod->ctors), &mod->num_ctors);
3213	else if (find_sec(info, ".init_array")) {
3214		/*
3215		 * This shouldn't happen with same compiler and binutils
3216		 * building all parts of the module.
3217		 */
3218		pr_warn("%s: has both .ctors and .init_array.\n",
3219		       mod->name);
3220		return -EINVAL;
3221	}
3222#endif
3223
3224	mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
3225						&mod->noinstr_text_size);
3226
3227#ifdef CONFIG_TRACEPOINTS
3228	mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
3229					     sizeof(*mod->tracepoints_ptrs),
3230					     &mod->num_tracepoints);
3231#endif
3232#ifdef CONFIG_TREE_SRCU
3233	mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
3234					     sizeof(*mod->srcu_struct_ptrs),
3235					     &mod->num_srcu_structs);
3236#endif
3237#ifdef CONFIG_BPF_EVENTS
3238	mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
3239					   sizeof(*mod->bpf_raw_events),
3240					   &mod->num_bpf_raw_events);
3241#endif
3242#ifdef CONFIG_JUMP_LABEL
3243	mod->jump_entries = section_objs(info, "__jump_table",
3244					sizeof(*mod->jump_entries),
3245					&mod->num_jump_entries);
3246#endif
3247#ifdef CONFIG_EVENT_TRACING
3248	mod->trace_events = section_objs(info, "_ftrace_events",
3249					 sizeof(*mod->trace_events),
3250					 &mod->num_trace_events);
3251	mod->trace_evals = section_objs(info, "_ftrace_eval_map",
3252					sizeof(*mod->trace_evals),
3253					&mod->num_trace_evals);
 
 
 
3254#endif
3255#ifdef CONFIG_TRACING
3256	mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
3257					 sizeof(*mod->trace_bprintk_fmt_start),
3258					 &mod->num_trace_bprintk_fmt);
 
 
 
 
 
 
 
3259#endif
3260#ifdef CONFIG_FTRACE_MCOUNT_RECORD
3261	/* sechdrs[0].sh_size is always zero */
3262	mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
3263					     sizeof(*mod->ftrace_callsites),
3264					     &mod->num_ftrace_callsites);
3265#endif
3266#ifdef CONFIG_FUNCTION_ERROR_INJECTION
3267	mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
3268					    sizeof(*mod->ei_funcs),
3269					    &mod->num_ei_funcs);
3270#endif
3271#ifdef CONFIG_KPROBES
3272	mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
3273						&mod->kprobes_text_size);
3274	mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
3275						sizeof(unsigned long),
3276						&mod->num_kprobe_blacklist);
3277#endif
3278	mod->extable = section_objs(info, "__ex_table",
3279				    sizeof(*mod->extable), &mod->num_exentries);
3280
3281	if (section_addr(info, "__obsparm"))
3282		pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
 
3283
3284	info->debug = section_objs(info, "__dyndbg",
3285				   sizeof(*info->debug), &info->num_debug);
3286
3287	return 0;
3288}
3289
3290static int move_module(struct module *mod, struct load_info *info)
3291{
3292	int i;
3293	void *ptr;
3294
3295	/* Do the allocs. */
3296	ptr = module_alloc(mod->core_layout.size);
3297	/*
3298	 * The pointer to this block is stored in the module structure
3299	 * which is inside the block. Just mark it as not being a
3300	 * leak.
3301	 */
3302	kmemleak_not_leak(ptr);
3303	if (!ptr)
3304		return -ENOMEM;
3305
3306	memset(ptr, 0, mod->core_layout.size);
3307	mod->core_layout.base = ptr;
3308
3309	if (mod->init_layout.size) {
3310		ptr = module_alloc(mod->init_layout.size);
3311		/*
3312		 * The pointer to this block is stored in the module structure
3313		 * which is inside the block. This block doesn't need to be
3314		 * scanned as it contains data and code that will be freed
3315		 * after the module is initialized.
3316		 */
3317		kmemleak_ignore(ptr);
3318		if (!ptr) {
3319			module_memfree(mod->core_layout.base);
3320			return -ENOMEM;
3321		}
3322		memset(ptr, 0, mod->init_layout.size);
3323		mod->init_layout.base = ptr;
3324	} else
3325		mod->init_layout.base = NULL;
3326
3327	/* Transfer each section which specifies SHF_ALLOC */
3328	pr_debug("final section addresses:\n");
3329	for (i = 0; i < info->hdr->e_shnum; i++) {
3330		void *dest;
3331		Elf_Shdr *shdr = &info->sechdrs[i];
3332
3333		if (!(shdr->sh_flags & SHF_ALLOC))
3334			continue;
3335
3336		if (shdr->sh_entsize & INIT_OFFSET_MASK)
3337			dest = mod->init_layout.base
3338				+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3339		else
3340			dest = mod->core_layout.base + shdr->sh_entsize;
3341
3342		if (shdr->sh_type != SHT_NOBITS)
3343			memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3344		/* Update sh_addr to point to copy in image. */
3345		shdr->sh_addr = (unsigned long)dest;
3346		pr_debug("\t0x%lx %s\n",
3347			 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3348	}
3349
3350	return 0;
3351}
3352
3353static int check_module_license_and_versions(struct module *mod)
3354{
3355	int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
3356
3357	/*
3358	 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3359	 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3360	 * using GPL-only symbols it needs.
3361	 */
3362	if (strcmp(mod->name, "ndiswrapper") == 0)
3363		add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3364
3365	/* driverloader was caught wrongly pretending to be under GPL */
3366	if (strcmp(mod->name, "driverloader") == 0)
3367		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3368				 LOCKDEP_NOW_UNRELIABLE);
3369
3370	/* lve claims to be GPL but upstream won't provide source */
3371	if (strcmp(mod->name, "lve") == 0)
3372		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3373				 LOCKDEP_NOW_UNRELIABLE);
3374
3375	if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
3376		pr_warn("%s: module license taints kernel.\n", mod->name);
3377
3378#ifdef CONFIG_MODVERSIONS
3379	if ((mod->num_syms && !mod->crcs)
3380	    || (mod->num_gpl_syms && !mod->gpl_crcs)
3381	    || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3382#ifdef CONFIG_UNUSED_SYMBOLS
3383	    || (mod->num_unused_syms && !mod->unused_crcs)
3384	    || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3385#endif
3386		) {
3387		return try_to_force_load(mod,
3388					 "no versions for exported symbols");
3389	}
3390#endif
3391	return 0;
3392}
3393
3394static void flush_module_icache(const struct module *mod)
3395{
 
 
 
 
 
 
3396	/*
3397	 * Flush the instruction cache, since we've played with text.
3398	 * Do it before processing of module parameters, so the module
3399	 * can provide parameter accessor functions of its own.
3400	 */
3401	if (mod->init_layout.base)
3402		flush_icache_range((unsigned long)mod->init_layout.base,
3403				   (unsigned long)mod->init_layout.base
3404				   + mod->init_layout.size);
3405	flush_icache_range((unsigned long)mod->core_layout.base,
3406			   (unsigned long)mod->core_layout.base + mod->core_layout.size);
 
 
3407}
3408
3409int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3410				     Elf_Shdr *sechdrs,
3411				     char *secstrings,
3412				     struct module *mod)
3413{
3414	return 0;
3415}
3416
3417/* module_blacklist is a comma-separated list of module names */
3418static char *module_blacklist;
3419static bool blacklisted(const char *module_name)
3420{
3421	const char *p;
3422	size_t len;
3423
3424	if (!module_blacklist)
3425		return false;
3426
3427	for (p = module_blacklist; *p; p += len) {
3428		len = strcspn(p, ",");
3429		if (strlen(module_name) == len && !memcmp(module_name, p, len))
3430			return true;
3431		if (p[len] == ',')
3432			len++;
3433	}
3434	return false;
3435}
3436core_param(module_blacklist, module_blacklist, charp, 0400);
3437
3438static struct module *layout_and_allocate(struct load_info *info, int flags)
3439{
 
3440	struct module *mod;
3441	unsigned int ndx;
3442	int err;
3443
3444	err = check_modinfo(info->mod, info, flags);
 
 
 
 
3445	if (err)
3446		return ERR_PTR(err);
3447
3448	/* Allow arches to frob section contents and sizes.  */
3449	err = module_frob_arch_sections(info->hdr, info->sechdrs,
3450					info->secstrings, info->mod);
3451	if (err < 0)
3452		return ERR_PTR(err);
3453
3454	err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
3455					  info->secstrings, info->mod);
3456	if (err < 0)
3457		return ERR_PTR(err);
3458
3459	/* We will do a special allocation for per-cpu sections later. */
3460	info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3461
3462	/*
3463	 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3464	 * layout_sections() can put it in the right place.
3465	 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3466	 */
3467	ndx = find_sec(info, ".data..ro_after_init");
3468	if (ndx)
3469		info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3470	/*
3471	 * Mark the __jump_table section as ro_after_init as well: these data
3472	 * structures are never modified, with the exception of entries that
3473	 * refer to code in the __init section, which are annotated as such
3474	 * at module load time.
3475	 */
3476	ndx = find_sec(info, "__jump_table");
3477	if (ndx)
3478		info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3479
3480	/* Determine total sizes, and put offsets in sh_entsize.  For now
3481	   this is done generically; there doesn't appear to be any
3482	   special cases for the architectures. */
3483	layout_sections(info->mod, info);
3484	layout_symtab(info->mod, info);
 
 
 
 
 
 
 
3485
3486	/* Allocate and move to the final place */
3487	err = move_module(info->mod, info);
3488	if (err)
3489		return ERR_PTR(err);
3490
3491	/* Module has been copied to its final place now: return it. */
3492	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3493	kmemleak_load_module(mod, info);
3494	return mod;
 
 
 
 
 
 
 
3495}
3496
3497/* mod is no longer valid after this! */
3498static void module_deallocate(struct module *mod, struct load_info *info)
3499{
 
3500	percpu_modfree(mod);
3501	module_arch_freeing_init(mod);
3502	module_memfree(mod->init_layout.base);
3503	module_memfree(mod->core_layout.base);
3504}
3505
3506int __weak module_finalize(const Elf_Ehdr *hdr,
3507			   const Elf_Shdr *sechdrs,
3508			   struct module *me)
3509{
3510	return 0;
3511}
3512
3513static int post_relocation(struct module *mod, const struct load_info *info)
3514{
3515	/* Sort exception table now relocations are done. */
3516	sort_extable(mod->extable, mod->extable + mod->num_exentries);
3517
3518	/* Copy relocated percpu area over. */
3519	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3520		       info->sechdrs[info->index.pcpu].sh_size);
3521
3522	/* Setup kallsyms-specific fields. */
3523	add_kallsyms(mod, info);
3524
3525	/* Arch-specific module finalizing. */
3526	return module_finalize(info->hdr, info->sechdrs, mod);
3527}
3528
3529/* Is this module of this name done loading?  No locks held. */
3530static bool finished_loading(const char *name)
3531{
3532	struct module *mod;
3533	bool ret;
3534
3535	/*
3536	 * The module_mutex should not be a heavily contended lock;
3537	 * if we get the occasional sleep here, we'll go an extra iteration
3538	 * in the wait_event_interruptible(), which is harmless.
3539	 */
3540	sched_annotate_sleep();
3541	mutex_lock(&module_mutex);
3542	mod = find_module_all(name, strlen(name), true);
3543	ret = !mod || mod->state == MODULE_STATE_LIVE;
3544	mutex_unlock(&module_mutex);
3545
3546	return ret;
3547}
3548
3549/* Call module constructors. */
3550static void do_mod_ctors(struct module *mod)
3551{
3552#ifdef CONFIG_CONSTRUCTORS
3553	unsigned long i;
3554
3555	for (i = 0; i < mod->num_ctors; i++)
3556		mod->ctors[i]();
3557#endif
3558}
3559
3560/* For freeing module_init on success, in case kallsyms traversing */
3561struct mod_initfree {
3562	struct llist_node node;
3563	void *module_init;
3564};
3565
3566static void do_free_init(struct work_struct *w)
3567{
3568	struct llist_node *pos, *n, *list;
3569	struct mod_initfree *initfree;
3570
3571	list = llist_del_all(&init_free_list);
3572
3573	synchronize_rcu();
3574
3575	llist_for_each_safe(pos, n, list) {
3576		initfree = container_of(pos, struct mod_initfree, node);
3577		module_memfree(initfree->module_init);
3578		kfree(initfree);
3579	}
3580}
3581
3582static int __init modules_wq_init(void)
3583{
3584	INIT_WORK(&init_free_wq, do_free_init);
3585	init_llist_head(&init_free_list);
3586	return 0;
3587}
3588module_init(modules_wq_init);
3589
3590/*
3591 * This is where the real work happens.
3592 *
3593 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3594 * helper command 'lx-symbols'.
3595 */
3596static noinline int do_init_module(struct module *mod)
3597{
3598	int ret = 0;
3599	struct mod_initfree *freeinit;
3600
3601	freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3602	if (!freeinit) {
3603		ret = -ENOMEM;
3604		goto fail;
3605	}
3606	freeinit->module_init = mod->init_layout.base;
3607
3608	/*
3609	 * We want to find out whether @mod uses async during init.  Clear
3610	 * PF_USED_ASYNC.  async_schedule*() will set it.
3611	 */
3612	current->flags &= ~PF_USED_ASYNC;
3613
3614	do_mod_ctors(mod);
3615	/* Start the module */
3616	if (mod->init != NULL)
3617		ret = do_one_initcall(mod->init);
3618	if (ret < 0) {
3619		goto fail_free_freeinit;
3620	}
3621	if (ret > 0) {
3622		pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3623			"follow 0/-E convention\n"
3624			"%s: loading module anyway...\n",
3625			__func__, mod->name, ret, __func__);
3626		dump_stack();
3627	}
3628
3629	/* Now it's a first class citizen! */
3630	mod->state = MODULE_STATE_LIVE;
3631	blocking_notifier_call_chain(&module_notify_list,
3632				     MODULE_STATE_LIVE, mod);
3633
3634	/*
3635	 * We need to finish all async code before the module init sequence
3636	 * is done.  This has potential to deadlock.  For example, a newly
3637	 * detected block device can trigger request_module() of the
3638	 * default iosched from async probing task.  Once userland helper
3639	 * reaches here, async_synchronize_full() will wait on the async
3640	 * task waiting on request_module() and deadlock.
3641	 *
3642	 * This deadlock is avoided by perfomring async_synchronize_full()
3643	 * iff module init queued any async jobs.  This isn't a full
3644	 * solution as it will deadlock the same if module loading from
3645	 * async jobs nests more than once; however, due to the various
3646	 * constraints, this hack seems to be the best option for now.
3647	 * Please refer to the following thread for details.
3648	 *
3649	 * http://thread.gmane.org/gmane.linux.kernel/1420814
3650	 */
3651	if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3652		async_synchronize_full();
3653
3654	ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
3655			mod->init_layout.size);
3656	mutex_lock(&module_mutex);
3657	/* Drop initial reference. */
3658	module_put(mod);
3659	trim_init_extable(mod);
3660#ifdef CONFIG_KALLSYMS
3661	/* Switch to core kallsyms now init is done: kallsyms may be walking! */
3662	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3663#endif
3664	module_enable_ro(mod, true);
3665	mod_tree_remove_init(mod);
3666	module_arch_freeing_init(mod);
3667	mod->init_layout.base = NULL;
3668	mod->init_layout.size = 0;
3669	mod->init_layout.ro_size = 0;
3670	mod->init_layout.ro_after_init_size = 0;
3671	mod->init_layout.text_size = 0;
3672	/*
3673	 * We want to free module_init, but be aware that kallsyms may be
3674	 * walking this with preempt disabled.  In all the failure paths, we
3675	 * call synchronize_rcu(), but we don't want to slow down the success
3676	 * path. module_memfree() cannot be called in an interrupt, so do the
3677	 * work and call synchronize_rcu() in a work queue.
3678	 *
3679	 * Note that module_alloc() on most architectures creates W+X page
3680	 * mappings which won't be cleaned up until do_free_init() runs.  Any
3681	 * code such as mark_rodata_ro() which depends on those mappings to
3682	 * be cleaned up needs to sync with the queued work - ie
3683	 * rcu_barrier()
3684	 */
3685	if (llist_add(&freeinit->node, &init_free_list))
3686		schedule_work(&init_free_wq);
3687
3688	mutex_unlock(&module_mutex);
3689	wake_up_all(&module_wq);
3690
3691	return 0;
3692
3693fail_free_freeinit:
3694	kfree(freeinit);
3695fail:
3696	/* Try to protect us from buggy refcounters. */
3697	mod->state = MODULE_STATE_GOING;
3698	synchronize_rcu();
3699	module_put(mod);
3700	blocking_notifier_call_chain(&module_notify_list,
3701				     MODULE_STATE_GOING, mod);
3702	klp_module_going(mod);
3703	ftrace_release_mod(mod);
3704	free_module(mod);
3705	wake_up_all(&module_wq);
3706	return ret;
3707}
3708
3709static int may_init_module(void)
3710{
3711	if (!capable(CAP_SYS_MODULE) || modules_disabled)
3712		return -EPERM;
3713
3714	return 0;
3715}
3716
3717/*
3718 * We try to place it in the list now to make sure it's unique before
3719 * we dedicate too many resources.  In particular, temporary percpu
3720 * memory exhaustion.
3721 */
3722static int add_unformed_module(struct module *mod)
3723{
3724	int err;
3725	struct module *old;
3726
3727	mod->state = MODULE_STATE_UNFORMED;
3728
3729again:
3730	mutex_lock(&module_mutex);
3731	old = find_module_all(mod->name, strlen(mod->name), true);
3732	if (old != NULL) {
3733		if (old->state != MODULE_STATE_LIVE) {
3734			/* Wait in case it fails to load. */
3735			mutex_unlock(&module_mutex);
3736			err = wait_event_interruptible(module_wq,
3737					       finished_loading(mod->name));
3738			if (err)
3739				goto out_unlocked;
3740			goto again;
3741		}
3742		err = -EEXIST;
3743		goto out;
3744	}
3745	mod_update_bounds(mod);
3746	list_add_rcu(&mod->list, &modules);
3747	mod_tree_insert(mod);
3748	err = 0;
3749
3750out:
3751	mutex_unlock(&module_mutex);
3752out_unlocked:
3753	return err;
3754}
3755
3756static int complete_formation(struct module *mod, struct load_info *info)
3757{
3758	int err;
3759
3760	mutex_lock(&module_mutex);
3761
3762	/* Find duplicate symbols (must be called under lock). */
3763	err = verify_exported_symbols(mod);
3764	if (err < 0)
3765		goto out;
3766
3767	/* This relies on module_mutex for list integrity. */
3768	module_bug_finalize(info->hdr, info->sechdrs, mod);
3769
3770	module_enable_ro(mod, false);
3771	module_enable_nx(mod);
3772	module_enable_x(mod);
3773
3774	/* Mark state as coming so strong_try_module_get() ignores us,
3775	 * but kallsyms etc. can see us. */
3776	mod->state = MODULE_STATE_COMING;
3777	mutex_unlock(&module_mutex);
3778
3779	return 0;
3780
3781out:
3782	mutex_unlock(&module_mutex);
3783	return err;
3784}
3785
3786static int prepare_coming_module(struct module *mod)
3787{
3788	int err;
3789
3790	ftrace_module_enable(mod);
3791	err = klp_module_coming(mod);
3792	if (err)
3793		return err;
3794
3795	blocking_notifier_call_chain(&module_notify_list,
3796				     MODULE_STATE_COMING, mod);
3797	return 0;
3798}
3799
3800static int unknown_module_param_cb(char *param, char *val, const char *modname,
3801				   void *arg)
3802{
3803	struct module *mod = arg;
3804	int ret;
3805
3806	if (strcmp(param, "async_probe") == 0) {
3807		mod->async_probe_requested = true;
3808		return 0;
3809	}
3810
3811	/* Check for magic 'dyndbg' arg */
3812	ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3813	if (ret != 0)
3814		pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3815	return 0;
3816}
3817
3818/* Allocate and load the module: note that size of section 0 is always
3819   zero, and we rely on this for optional sections. */
3820static int load_module(struct load_info *info, const char __user *uargs,
3821		       int flags)
 
3822{
 
3823	struct module *mod;
3824	long err = 0;
3825	char *after_dashes;
3826
3827	err = elf_header_check(info);
3828	if (err)
3829		goto free_copy;
3830
3831	err = setup_load_info(info, flags);
 
3832	if (err)
3833		goto free_copy;
3834
3835	if (blacklisted(info->name)) {
3836		err = -EPERM;
3837		goto free_copy;
3838	}
3839
3840	err = module_sig_check(info, flags);
3841	if (err)
3842		goto free_copy;
3843
3844	err = rewrite_section_headers(info, flags);
3845	if (err)
3846		goto free_copy;
3847
3848	/* Check module struct version now, before we try to use module. */
3849	if (!check_modstruct_version(info, info->mod)) {
3850		err = -ENOEXEC;
3851		goto free_copy;
3852	}
3853
3854	/* Figure out module layout, and allocate all the memory. */
3855	mod = layout_and_allocate(info, flags);
3856	if (IS_ERR(mod)) {
3857		err = PTR_ERR(mod);
3858		goto free_copy;
3859	}
3860
3861	audit_log_kern_module(mod->name);
3862
3863	/* Reserve our place in the list. */
3864	err = add_unformed_module(mod);
3865	if (err)
3866		goto free_module;
3867
3868#ifdef CONFIG_MODULE_SIG
3869	mod->sig_ok = info->sig_ok;
3870	if (!mod->sig_ok) {
3871		pr_notice_once("%s: module verification failed: signature "
3872			       "and/or required key missing - tainting "
3873			       "kernel\n", mod->name);
3874		add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3875	}
3876#endif
3877
3878	/* To avoid stressing percpu allocator, do this once we're unique. */
3879	err = percpu_modalloc(mod, info);
3880	if (err)
3881		goto unlink_mod;
3882
3883	/* Now module is in final location, initialize linked lists, etc. */
3884	err = module_unload_init(mod);
3885	if (err)
3886		goto unlink_mod;
3887
3888	init_param_lock(mod);
3889
3890	/* Now we've got everything in the final locations, we can
3891	 * find optional sections. */
3892	err = find_module_sections(mod, info);
3893	if (err)
3894		goto free_unload;
3895
3896	err = check_module_license_and_versions(mod);
3897	if (err)
3898		goto free_unload;
3899
3900	/* Set up MODINFO_ATTR fields */
3901	setup_modinfo(mod, info);
3902
3903	/* Fix up syms, so that st_value is a pointer to location. */
3904	err = simplify_symbols(mod, info);
3905	if (err < 0)
3906		goto free_modinfo;
3907
3908	err = apply_relocations(mod, info);
3909	if (err < 0)
3910		goto free_modinfo;
3911
3912	err = post_relocation(mod, info);
3913	if (err < 0)
3914		goto free_modinfo;
3915
3916	flush_module_icache(mod);
3917
3918	/* Now copy in args */
3919	mod->args = strndup_user(uargs, ~0UL >> 1);
3920	if (IS_ERR(mod->args)) {
3921		err = PTR_ERR(mod->args);
3922		goto free_arch_cleanup;
3923	}
3924
3925	dynamic_debug_setup(mod, info->debug, info->num_debug);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3926
3927	/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3928	ftrace_module_init(mod);
 
3929
3930	/* Finally it's fully formed, ready to start executing. */
3931	err = complete_formation(mod, info);
3932	if (err)
3933		goto ddebug_cleanup;
3934
3935	err = prepare_coming_module(mod);
3936	if (err)
3937		goto bug_cleanup;
3938
3939	/* Module is ready to execute: parsing args may do that. */
3940	after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3941				  -32768, 32767, mod,
3942				  unknown_module_param_cb);
3943	if (IS_ERR(after_dashes)) {
3944		err = PTR_ERR(after_dashes);
3945		goto coming_cleanup;
3946	} else if (after_dashes) {
3947		pr_warn("%s: parameters '%s' after `--' ignored\n",
3948		       mod->name, after_dashes);
3949	}
3950
3951	/* Link in to sysfs. */
3952	err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3953	if (err < 0)
3954		goto coming_cleanup;
3955
3956	if (is_livepatch_module(mod)) {
3957		err = copy_module_elf(mod, info);
3958		if (err < 0)
3959			goto sysfs_cleanup;
3960	}
3961
3962	/* Get rid of temporary copy. */
3963	free_copy(info);
 
3964
3965	/* Done! */
3966	trace_module_load(mod);
 
3967
3968	return do_init_module(mod);
3969
3970 sysfs_cleanup:
3971	mod_sysfs_teardown(mod);
3972 coming_cleanup:
3973	mod->state = MODULE_STATE_GOING;
3974	destroy_params(mod->kp, mod->num_kp);
3975	blocking_notifier_call_chain(&module_notify_list,
3976				     MODULE_STATE_GOING, mod);
3977	klp_module_going(mod);
3978 bug_cleanup:
3979	/* module_bug_cleanup needs module_mutex protection */
3980	mutex_lock(&module_mutex);
 
 
3981	module_bug_cleanup(mod);
 
 
 
 
 
3982	mutex_unlock(&module_mutex);
3983
3984 ddebug_cleanup:
3985	ftrace_release_mod(mod);
3986	dynamic_debug_remove(mod, info->debug);
3987	synchronize_rcu();
3988	kfree(mod->args);
3989 free_arch_cleanup:
3990	module_arch_cleanup(mod);
3991 free_modinfo:
3992	free_modinfo(mod);
3993 free_unload:
3994	module_unload_free(mod);
3995 unlink_mod:
3996	mutex_lock(&module_mutex);
3997	/* Unlink carefully: kallsyms could be walking list. */
3998	list_del_rcu(&mod->list);
3999	mod_tree_remove(mod);
4000	wake_up_all(&module_wq);
4001	/* Wait for RCU-sched synchronizing before releasing mod->list. */
4002	synchronize_rcu();
4003	mutex_unlock(&module_mutex);
4004 free_module:
4005	/* Free lock-classes; relies on the preceding sync_rcu() */
4006	lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
 
 
 
 
 
 
 
 
 
4007
4008	module_deallocate(mod, info);
4009 free_copy:
4010	free_copy(info);
4011	return err;
4012}
4013
 
4014SYSCALL_DEFINE3(init_module, void __user *, umod,
4015		unsigned long, len, const char __user *, uargs)
4016{
4017	int err;
4018	struct load_info info = { };
4019
4020	err = may_init_module();
4021	if (err)
4022		return err;
4023
4024	pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
4025	       umod, len, uargs);
 
 
4026
4027	err = copy_module_from_user(umod, len, &info);
4028	if (err)
4029		return err;
4030
4031	return load_module(&info, uargs, 0);
4032}
 
 
 
 
 
 
 
 
 
4033
4034SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
4035{
4036	struct load_info info = { };
4037	loff_t size;
4038	void *hdr;
4039	int err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4040
4041	err = may_init_module();
4042	if (err)
4043		return err;
 
 
4044
4045	pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
 
4046
4047	if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
4048		      |MODULE_INIT_IGNORE_VERMAGIC))
4049		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
4050
4051	err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
4052				       READING_MODULE);
4053	if (err)
4054		return err;
4055	info.hdr = hdr;
4056	info.len = size;
4057
4058	return load_module(&info, uargs, flags);
4059}
4060
4061static inline int within(unsigned long addr, void *start, unsigned long size)
4062{
4063	return ((void *)addr >= start && (void *)addr < start + size);
4064}
4065
4066#ifdef CONFIG_KALLSYMS
4067/*
4068 * This ignores the intensely annoying "mapping symbols" found
4069 * in ARM ELF files: $a, $t and $d.
4070 */
4071static inline int is_arm_mapping_symbol(const char *str)
4072{
4073	if (str[0] == '.' && str[1] == 'L')
4074		return true;
4075	return str[0] == '$' && strchr("axtd", str[1])
4076	       && (str[2] == '\0' || str[2] == '.');
4077}
4078
4079static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
4080{
4081	return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
4082}
4083
4084/*
4085 * Given a module and address, find the corresponding symbol and return its name
4086 * while providing its size and offset if needed.
4087 */
4088static const char *find_kallsyms_symbol(struct module *mod,
4089					unsigned long addr,
4090					unsigned long *size,
4091					unsigned long *offset)
4092{
4093	unsigned int i, best = 0;
4094	unsigned long nextval, bestval;
4095	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4096
4097	/* At worse, next value is at end of module */
4098	if (within_module_init(addr, mod))
4099		nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
4100	else
4101		nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
4102
4103	bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
4104
4105	/* Scan for closest preceding symbol, and next symbol. (ELF
4106	   starts real symbols at 1). */
4107	for (i = 1; i < kallsyms->num_symtab; i++) {
4108		const Elf_Sym *sym = &kallsyms->symtab[i];
4109		unsigned long thisval = kallsyms_symbol_value(sym);
4110
4111		if (sym->st_shndx == SHN_UNDEF)
4112			continue;
4113
4114		/* We ignore unnamed symbols: they're uninformative
4115		 * and inserted at a whim. */
4116		if (*kallsyms_symbol_name(kallsyms, i) == '\0'
4117		    || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
4118			continue;
4119
4120		if (thisval <= addr && thisval > bestval) {
4121			best = i;
4122			bestval = thisval;
4123		}
4124		if (thisval > addr && thisval < nextval)
4125			nextval = thisval;
 
4126	}
4127
4128	if (!best)
4129		return NULL;
4130
4131	if (size)
4132		*size = nextval - bestval;
4133	if (offset)
4134		*offset = addr - bestval;
4135
4136	return kallsyms_symbol_name(kallsyms, best);
4137}
4138
4139void * __weak dereference_module_function_descriptor(struct module *mod,
4140						     void *ptr)
4141{
4142	return ptr;
4143}
4144
4145/* For kallsyms to ask for address resolution.  NULL means not found.  Careful
4146 * not to lock to avoid deadlock on oopses, simply disable preemption. */
4147const char *module_address_lookup(unsigned long addr,
4148			    unsigned long *size,
4149			    unsigned long *offset,
4150			    char **modname,
4151			    char *namebuf)
4152{
 
4153	const char *ret = NULL;
4154	struct module *mod;
4155
4156	preempt_disable();
4157	mod = __module_address(addr);
4158	if (mod) {
4159		if (modname)
4160			*modname = mod->name;
4161
4162		ret = find_kallsyms_symbol(mod, addr, size, offset);
 
 
4163	}
4164	/* Make a copy in here where it's safe */
4165	if (ret) {
4166		strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
4167		ret = namebuf;
4168	}
4169	preempt_enable();
4170
4171	return ret;
4172}
4173
4174int lookup_module_symbol_name(unsigned long addr, char *symname)
4175{
4176	struct module *mod;
4177
4178	preempt_disable();
4179	list_for_each_entry_rcu(mod, &modules, list) {
4180		if (mod->state == MODULE_STATE_UNFORMED)
4181			continue;
4182		if (within_module(addr, mod)) {
4183			const char *sym;
4184
4185			sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
4186			if (!sym)
4187				goto out;
4188
4189			strlcpy(symname, sym, KSYM_NAME_LEN);
4190			preempt_enable();
4191			return 0;
4192		}
4193	}
4194out:
4195	preempt_enable();
4196	return -ERANGE;
4197}
4198
4199int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
4200			unsigned long *offset, char *modname, char *name)
4201{
4202	struct module *mod;
4203
4204	preempt_disable();
4205	list_for_each_entry_rcu(mod, &modules, list) {
4206		if (mod->state == MODULE_STATE_UNFORMED)
4207			continue;
4208		if (within_module(addr, mod)) {
4209			const char *sym;
4210
4211			sym = find_kallsyms_symbol(mod, addr, size, offset);
4212			if (!sym)
4213				goto out;
4214			if (modname)
4215				strlcpy(modname, mod->name, MODULE_NAME_LEN);
4216			if (name)
4217				strlcpy(name, sym, KSYM_NAME_LEN);
4218			preempt_enable();
4219			return 0;
4220		}
4221	}
4222out:
4223	preempt_enable();
4224	return -ERANGE;
4225}
4226
4227int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
4228			char *name, char *module_name, int *exported)
4229{
4230	struct module *mod;
4231
4232	preempt_disable();
4233	list_for_each_entry_rcu(mod, &modules, list) {
4234		struct mod_kallsyms *kallsyms;
4235
4236		if (mod->state == MODULE_STATE_UNFORMED)
4237			continue;
4238		kallsyms = rcu_dereference_sched(mod->kallsyms);
4239		if (symnum < kallsyms->num_symtab) {
4240			const Elf_Sym *sym = &kallsyms->symtab[symnum];
4241
4242			*value = kallsyms_symbol_value(sym);
4243			*type = kallsyms->typetab[symnum];
4244			strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
4245			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
4246			*exported = is_exported(name, *value, mod);
4247			preempt_enable();
4248			return 0;
4249		}
4250		symnum -= kallsyms->num_symtab;
4251	}
4252	preempt_enable();
4253	return -ERANGE;
4254}
4255
4256/* Given a module and name of symbol, find and return the symbol's value */
4257static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
4258{
4259	unsigned int i;
4260	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4261
4262	for (i = 0; i < kallsyms->num_symtab; i++) {
4263		const Elf_Sym *sym = &kallsyms->symtab[i];
4264
4265		if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
4266		    sym->st_shndx != SHN_UNDEF)
4267			return kallsyms_symbol_value(sym);
4268	}
4269	return 0;
4270}
4271
4272/* Look for this name: can be of form module:name. */
4273unsigned long module_kallsyms_lookup_name(const char *name)
4274{
4275	struct module *mod;
4276	char *colon;
4277	unsigned long ret = 0;
4278
4279	/* Don't lock: we're in enough trouble already. */
4280	preempt_disable();
4281	if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
4282		if ((mod = find_module_all(name, colon - name, false)) != NULL)
4283			ret = find_kallsyms_symbol_value(mod, colon+1);
 
 
4284	} else {
4285		list_for_each_entry_rcu(mod, &modules, list) {
4286			if (mod->state == MODULE_STATE_UNFORMED)
4287				continue;
4288			if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
4289				break;
4290		}
4291	}
4292	preempt_enable();
4293	return ret;
4294}
4295
4296int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
4297					     struct module *, unsigned long),
4298				   void *data)
4299{
4300	struct module *mod;
4301	unsigned int i;
4302	int ret;
4303
4304	module_assert_mutex();
4305
4306	list_for_each_entry(mod, &modules, list) {
4307		/* We hold module_mutex: no need for rcu_dereference_sched */
4308		struct mod_kallsyms *kallsyms = mod->kallsyms;
4309
4310		if (mod->state == MODULE_STATE_UNFORMED)
4311			continue;
4312		for (i = 0; i < kallsyms->num_symtab; i++) {
4313			const Elf_Sym *sym = &kallsyms->symtab[i];
4314
4315			if (sym->st_shndx == SHN_UNDEF)
4316				continue;
4317
4318			ret = fn(data, kallsyms_symbol_name(kallsyms, i),
4319				 mod, kallsyms_symbol_value(sym));
4320			if (ret != 0)
4321				return ret;
4322		}
4323	}
4324	return 0;
4325}
4326#endif /* CONFIG_KALLSYMS */
4327
4328/* Maximum number of characters written by module_flags() */
4329#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
4330
4331/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
4332static char *module_flags(struct module *mod, char *buf)
4333{
4334	int bx = 0;
4335
4336	BUG_ON(mod->state == MODULE_STATE_UNFORMED);
4337	if (mod->taints ||
4338	    mod->state == MODULE_STATE_GOING ||
4339	    mod->state == MODULE_STATE_COMING) {
4340		buf[bx++] = '(';
4341		bx += module_flags_taint(mod, buf + bx);
 
 
 
 
 
 
 
 
 
 
 
4342		/* Show a - for module-is-being-unloaded */
4343		if (mod->state == MODULE_STATE_GOING)
4344			buf[bx++] = '-';
4345		/* Show a + for module-is-being-loaded */
4346		if (mod->state == MODULE_STATE_COMING)
4347			buf[bx++] = '+';
4348		buf[bx++] = ')';
4349	}
4350	buf[bx] = '\0';
4351
4352	return buf;
4353}
4354
4355#ifdef CONFIG_PROC_FS
4356/* Called by the /proc file system to return a list of modules. */
4357static void *m_start(struct seq_file *m, loff_t *pos)
4358{
4359	mutex_lock(&module_mutex);
4360	return seq_list_start(&modules, *pos);
4361}
4362
4363static void *m_next(struct seq_file *m, void *p, loff_t *pos)
4364{
4365	return seq_list_next(p, &modules, pos);
4366}
4367
4368static void m_stop(struct seq_file *m, void *p)
4369{
4370	mutex_unlock(&module_mutex);
4371}
4372
4373static int m_show(struct seq_file *m, void *p)
4374{
4375	struct module *mod = list_entry(p, struct module, list);
4376	char buf[MODULE_FLAGS_BUF_SIZE];
4377	void *value;
4378
4379	/* We always ignore unformed modules. */
4380	if (mod->state == MODULE_STATE_UNFORMED)
4381		return 0;
4382
4383	seq_printf(m, "%s %u",
4384		   mod->name, mod->init_layout.size + mod->core_layout.size);
4385	print_unload_info(m, mod);
4386
4387	/* Informative for users. */
4388	seq_printf(m, " %s",
4389		   mod->state == MODULE_STATE_GOING ? "Unloading" :
4390		   mod->state == MODULE_STATE_COMING ? "Loading" :
4391		   "Live");
4392	/* Used by oprofile and other similar tools. */
4393	value = m->private ? NULL : mod->core_layout.base;
4394	seq_printf(m, " 0x%px", value);
4395
4396	/* Taints info */
4397	if (mod->taints)
4398		seq_printf(m, " %s", module_flags(mod, buf));
4399
4400	seq_puts(m, "\n");
4401	return 0;
4402}
4403
4404/* Format: modulename size refcount deps address
4405
4406   Where refcount is a number or -, and deps is a comma-separated list
4407   of depends or -.
4408*/
4409static const struct seq_operations modules_op = {
4410	.start	= m_start,
4411	.next	= m_next,
4412	.stop	= m_stop,
4413	.show	= m_show
4414};
4415
4416/*
4417 * This also sets the "private" pointer to non-NULL if the
4418 * kernel pointers should be hidden (so you can just test
4419 * "m->private" to see if you should keep the values private).
4420 *
4421 * We use the same logic as for /proc/kallsyms.
4422 */
4423static int modules_open(struct inode *inode, struct file *file)
4424{
4425	int err = seq_open(file, &modules_op);
4426
4427	if (!err) {
4428		struct seq_file *m = file->private_data;
4429		m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
4430	}
4431
4432	return err;
4433}
4434
4435static const struct proc_ops modules_proc_ops = {
4436	.proc_flags	= PROC_ENTRY_PERMANENT,
4437	.proc_open	= modules_open,
4438	.proc_read	= seq_read,
4439	.proc_lseek	= seq_lseek,
4440	.proc_release	= seq_release,
4441};
4442
4443static int __init proc_modules_init(void)
4444{
4445	proc_create("modules", 0, NULL, &modules_proc_ops);
4446	return 0;
4447}
4448module_init(proc_modules_init);
4449#endif
4450
4451/* Given an address, look for it in the module exception tables. */
4452const struct exception_table_entry *search_module_extables(unsigned long addr)
4453{
4454	const struct exception_table_entry *e = NULL;
4455	struct module *mod;
4456
4457	preempt_disable();
4458	mod = __module_address(addr);
4459	if (!mod)
4460		goto out;
4461
4462	if (!mod->num_exentries)
4463		goto out;
4464
4465	e = search_extable(mod->extable,
4466			   mod->num_exentries,
4467			   addr);
4468out:
4469	preempt_enable();
4470
4471	/*
4472	 * Now, if we found one, we are running inside it now, hence
4473	 * we cannot unload the module, hence no refcnt needed.
4474	 */
4475	return e;
4476}
4477
4478/*
4479 * is_module_address - is this address inside a module?
4480 * @addr: the address to check.
4481 *
4482 * See is_module_text_address() if you simply want to see if the address
4483 * is code (not data).
4484 */
4485bool is_module_address(unsigned long addr)
4486{
4487	bool ret;
4488
4489	preempt_disable();
4490	ret = __module_address(addr) != NULL;
4491	preempt_enable();
4492
4493	return ret;
4494}
4495
4496/*
4497 * __module_address - get the module which contains an address.
4498 * @addr: the address.
4499 *
4500 * Must be called with preempt disabled or module mutex held so that
4501 * module doesn't get freed during this.
4502 */
4503struct module *__module_address(unsigned long addr)
4504{
4505	struct module *mod;
4506
4507	if (addr < module_addr_min || addr > module_addr_max)
4508		return NULL;
4509
4510	module_assert_mutex_or_preempt();
4511
4512	mod = mod_find(addr);
4513	if (mod) {
4514		BUG_ON(!within_module(addr, mod));
4515		if (mod->state == MODULE_STATE_UNFORMED)
4516			mod = NULL;
4517	}
4518	return mod;
4519}
 
4520
4521/*
4522 * is_module_text_address - is this address inside module code?
4523 * @addr: the address to check.
4524 *
4525 * See is_module_address() if you simply want to see if the address is
4526 * anywhere in a module.  See kernel_text_address() for testing if an
4527 * address corresponds to kernel or module code.
4528 */
4529bool is_module_text_address(unsigned long addr)
4530{
4531	bool ret;
4532
4533	preempt_disable();
4534	ret = __module_text_address(addr) != NULL;
4535	preempt_enable();
4536
4537	return ret;
4538}
4539
4540/*
4541 * __module_text_address - get the module whose code contains an address.
4542 * @addr: the address.
4543 *
4544 * Must be called with preempt disabled or module mutex held so that
4545 * module doesn't get freed during this.
4546 */
4547struct module *__module_text_address(unsigned long addr)
4548{
4549	struct module *mod = __module_address(addr);
4550	if (mod) {
4551		/* Make sure it's within the text section. */
4552		if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4553		    && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4554			mod = NULL;
4555	}
4556	return mod;
4557}
 
4558
4559/* Don't grab lock, we're oopsing. */
4560void print_modules(void)
4561{
4562	struct module *mod;
4563	char buf[MODULE_FLAGS_BUF_SIZE];
4564
4565	printk(KERN_DEFAULT "Modules linked in:");
4566	/* Most callers should already have preempt disabled, but make sure */
4567	preempt_disable();
4568	list_for_each_entry_rcu(mod, &modules, list) {
4569		if (mod->state == MODULE_STATE_UNFORMED)
4570			continue;
4571		pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4572	}
4573	preempt_enable();
4574	if (last_unloaded_module[0])
4575		pr_cont(" [last unloaded: %s]", last_unloaded_module);
4576	pr_cont("\n");
4577}
4578
4579#ifdef CONFIG_MODVERSIONS
4580/* Generate the signature for all relevant module structures here.
4581 * If these change, we don't want to try to parse the module. */
4582void module_layout(struct module *mod,
4583		   struct modversion_info *ver,
4584		   struct kernel_param *kp,
4585		   struct kernel_symbol *ks,
4586		   struct tracepoint * const *tp)
4587{
4588}
4589EXPORT_SYMBOL(module_layout);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4590#endif