Linux Audio

Check our new training course

Loading...
v4.17
 
   1/*
   2   Copyright (C) 2002 Richard Henderson
   3   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
   4
   5    This program is free software; you can redistribute it and/or modify
   6    it under the terms of the GNU General Public License as published by
   7    the Free Software Foundation; either version 2 of the License, or
   8    (at your option) any later version.
   9
  10    This program is distributed in the hope that it will be useful,
  11    but WITHOUT ANY WARRANTY; without even the implied warranty of
  12    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13    GNU General Public License for more details.
  14
  15    You should have received a copy of the GNU General Public License
  16    along with this program; if not, write to the Free Software
  17    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18*/
 
 
 
  19#include <linux/export.h>
  20#include <linux/extable.h>
  21#include <linux/moduleloader.h>
 
  22#include <linux/trace_events.h>
  23#include <linux/init.h>
  24#include <linux/kallsyms.h>
  25#include <linux/file.h>
  26#include <linux/fs.h>
  27#include <linux/sysfs.h>
  28#include <linux/kernel.h>
  29#include <linux/slab.h>
  30#include <linux/vmalloc.h>
  31#include <linux/elf.h>
  32#include <linux/proc_fs.h>
  33#include <linux/security.h>
  34#include <linux/seq_file.h>
  35#include <linux/syscalls.h>
  36#include <linux/fcntl.h>
  37#include <linux/rcupdate.h>
  38#include <linux/capability.h>
  39#include <linux/cpu.h>
  40#include <linux/moduleparam.h>
  41#include <linux/errno.h>
  42#include <linux/err.h>
  43#include <linux/vermagic.h>
  44#include <linux/notifier.h>
  45#include <linux/sched.h>
  46#include <linux/device.h>
  47#include <linux/string.h>
  48#include <linux/mutex.h>
  49#include <linux/rculist.h>
  50#include <linux/uaccess.h>
  51#include <asm/cacheflush.h>
  52#include <linux/set_memory.h>
  53#include <asm/mmu_context.h>
  54#include <linux/license.h>
  55#include <asm/sections.h>
  56#include <linux/tracepoint.h>
  57#include <linux/ftrace.h>
  58#include <linux/livepatch.h>
  59#include <linux/async.h>
  60#include <linux/percpu.h>
  61#include <linux/kmemleak.h>
  62#include <linux/jump_label.h>
  63#include <linux/pfn.h>
  64#include <linux/bsearch.h>
  65#include <linux/dynamic_debug.h>
  66#include <linux/audit.h>
  67#include <uapi/linux/module.h>
  68#include "module-internal.h"
  69
  70#define CREATE_TRACE_POINTS
  71#include <trace/events/module.h>
  72
  73#ifndef ARCH_SHF_SMALL
  74#define ARCH_SHF_SMALL 0
  75#endif
  76
  77/*
  78 * Modules' sections will be aligned on page boundaries
  79 * to ensure complete separation of code and data, but
  80 * only when CONFIG_STRICT_MODULE_RWX=y
  81 */
  82#ifdef CONFIG_STRICT_MODULE_RWX
  83# define debug_align(X) ALIGN(X, PAGE_SIZE)
  84#else
  85# define debug_align(X) (X)
  86#endif
  87
  88/* If this is set, the section belongs in the init part of the module */
  89#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  90
  91/*
  92 * Mutex protects:
  93 * 1) List of modules (also safely readable with preempt_disable),
  94 * 2) module_use links,
  95 * 3) module_addr_min/module_addr_max.
  96 * (delete and add uses RCU list operations). */
  97DEFINE_MUTEX(module_mutex);
  98EXPORT_SYMBOL_GPL(module_mutex);
  99static LIST_HEAD(modules);
 100
 
 
 
 
 101#ifdef CONFIG_MODULES_TREE_LOOKUP
 102
 103/*
 104 * Use a latched RB-tree for __module_address(); this allows us to use
 105 * RCU-sched lookups of the address from any context.
 106 *
 107 * This is conditional on PERF_EVENTS || TRACING because those can really hit
 108 * __module_address() hard by doing a lot of stack unwinding; potentially from
 109 * NMI context.
 110 */
 111
 112static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
 113{
 114	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 115
 116	return (unsigned long)layout->base;
 117}
 118
 119static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
 120{
 121	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 122
 123	return (unsigned long)layout->size;
 124}
 125
 126static __always_inline bool
 127mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
 128{
 129	return __mod_tree_val(a) < __mod_tree_val(b);
 130}
 131
 132static __always_inline int
 133mod_tree_comp(void *key, struct latch_tree_node *n)
 134{
 135	unsigned long val = (unsigned long)key;
 136	unsigned long start, end;
 137
 138	start = __mod_tree_val(n);
 139	if (val < start)
 140		return -1;
 141
 142	end = start + __mod_tree_size(n);
 143	if (val >= end)
 144		return 1;
 145
 146	return 0;
 147}
 148
 149static const struct latch_tree_ops mod_tree_ops = {
 150	.less = mod_tree_less,
 151	.comp = mod_tree_comp,
 152};
 153
 154static struct mod_tree_root {
 155	struct latch_tree_root root;
 156	unsigned long addr_min;
 157	unsigned long addr_max;
 158} mod_tree __cacheline_aligned = {
 159	.addr_min = -1UL,
 160};
 161
 162#define module_addr_min mod_tree.addr_min
 163#define module_addr_max mod_tree.addr_max
 164
 165static noinline void __mod_tree_insert(struct mod_tree_node *node)
 166{
 167	latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
 168}
 169
 170static void __mod_tree_remove(struct mod_tree_node *node)
 171{
 172	latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
 173}
 174
 175/*
 176 * These modifications: insert, remove_init and remove; are serialized by the
 177 * module_mutex.
 178 */
 179static void mod_tree_insert(struct module *mod)
 180{
 181	mod->core_layout.mtn.mod = mod;
 182	mod->init_layout.mtn.mod = mod;
 183
 184	__mod_tree_insert(&mod->core_layout.mtn);
 185	if (mod->init_layout.size)
 186		__mod_tree_insert(&mod->init_layout.mtn);
 187}
 188
 189static void mod_tree_remove_init(struct module *mod)
 190{
 191	if (mod->init_layout.size)
 192		__mod_tree_remove(&mod->init_layout.mtn);
 193}
 194
 195static void mod_tree_remove(struct module *mod)
 196{
 197	__mod_tree_remove(&mod->core_layout.mtn);
 198	mod_tree_remove_init(mod);
 199}
 200
 201static struct module *mod_find(unsigned long addr)
 202{
 203	struct latch_tree_node *ltn;
 204
 205	ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
 206	if (!ltn)
 207		return NULL;
 208
 209	return container_of(ltn, struct mod_tree_node, node)->mod;
 210}
 211
 212#else /* MODULES_TREE_LOOKUP */
 213
 214static unsigned long module_addr_min = -1UL, module_addr_max = 0;
 215
 216static void mod_tree_insert(struct module *mod) { }
 217static void mod_tree_remove_init(struct module *mod) { }
 218static void mod_tree_remove(struct module *mod) { }
 219
 220static struct module *mod_find(unsigned long addr)
 221{
 222	struct module *mod;
 223
 224	list_for_each_entry_rcu(mod, &modules, list) {
 
 225		if (within_module(addr, mod))
 226			return mod;
 227	}
 228
 229	return NULL;
 230}
 231
 232#endif /* MODULES_TREE_LOOKUP */
 233
 234/*
 235 * Bounds of module text, for speeding up __module_address.
 236 * Protected by module_mutex.
 237 */
 238static void __mod_update_bounds(void *base, unsigned int size)
 239{
 240	unsigned long min = (unsigned long)base;
 241	unsigned long max = min + size;
 242
 243	if (min < module_addr_min)
 244		module_addr_min = min;
 245	if (max > module_addr_max)
 246		module_addr_max = max;
 247}
 248
 249static void mod_update_bounds(struct module *mod)
 250{
 251	__mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
 252	if (mod->init_layout.size)
 253		__mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
 254}
 255
 256#ifdef CONFIG_KGDB_KDB
 257struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
 258#endif /* CONFIG_KGDB_KDB */
 259
 260static void module_assert_mutex(void)
 261{
 262	lockdep_assert_held(&module_mutex);
 263}
 264
 265static void module_assert_mutex_or_preempt(void)
 266{
 267#ifdef CONFIG_LOCKDEP
 268	if (unlikely(!debug_locks))
 269		return;
 270
 271	WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
 272		!lockdep_is_held(&module_mutex));
 273#endif
 274}
 275
 276static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
 277#ifndef CONFIG_MODULE_SIG_FORCE
 278module_param(sig_enforce, bool_enable_only, 0644);
 279#endif /* !CONFIG_MODULE_SIG_FORCE */
 280
 281/*
 282 * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
 283 * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
 284 */
 285bool is_module_sig_enforced(void)
 286{
 287	return sig_enforce;
 288}
 289EXPORT_SYMBOL(is_module_sig_enforced);
 290
 
 
 
 
 
 291/* Block module loading/unloading? */
 292int modules_disabled = 0;
 293core_param(nomodule, modules_disabled, bint, 0);
 294
 295/* Waiting for a module to finish initializing? */
 296static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 297
 298static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 299
 300int register_module_notifier(struct notifier_block *nb)
 301{
 302	return blocking_notifier_chain_register(&module_notify_list, nb);
 303}
 304EXPORT_SYMBOL(register_module_notifier);
 305
 306int unregister_module_notifier(struct notifier_block *nb)
 307{
 308	return blocking_notifier_chain_unregister(&module_notify_list, nb);
 309}
 310EXPORT_SYMBOL(unregister_module_notifier);
 311
 312struct load_info {
 313	const char *name;
 314	Elf_Ehdr *hdr;
 315	unsigned long len;
 316	Elf_Shdr *sechdrs;
 317	char *secstrings, *strtab;
 318	unsigned long symoffs, stroffs;
 319	struct _ddebug *debug;
 320	unsigned int num_debug;
 321	bool sig_ok;
 322#ifdef CONFIG_KALLSYMS
 323	unsigned long mod_kallsyms_init_off;
 324#endif
 325	struct {
 326		unsigned int sym, str, mod, vers, info, pcpu;
 327	} index;
 328};
 329
 330/*
 331 * We require a truly strong try_module_get(): 0 means success.
 332 * Otherwise an error is returned due to ongoing or failed
 333 * initialization etc.
 334 */
 335static inline int strong_try_module_get(struct module *mod)
 336{
 337	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
 338	if (mod && mod->state == MODULE_STATE_COMING)
 339		return -EBUSY;
 340	if (try_module_get(mod))
 341		return 0;
 342	else
 343		return -ENOENT;
 344}
 345
 346static inline void add_taint_module(struct module *mod, unsigned flag,
 347				    enum lockdep_ok lockdep_ok)
 348{
 349	add_taint(flag, lockdep_ok);
 350	set_bit(flag, &mod->taints);
 351}
 352
 353/*
 354 * A thread that wants to hold a reference to a module only while it
 355 * is running can call this to safely exit.  nfsd and lockd use this.
 356 */
 357void __noreturn __module_put_and_exit(struct module *mod, long code)
 358{
 359	module_put(mod);
 360	do_exit(code);
 361}
 362EXPORT_SYMBOL(__module_put_and_exit);
 363
 364/* Find a module section: 0 means not found. */
 365static unsigned int find_sec(const struct load_info *info, const char *name)
 366{
 367	unsigned int i;
 368
 369	for (i = 1; i < info->hdr->e_shnum; i++) {
 370		Elf_Shdr *shdr = &info->sechdrs[i];
 371		/* Alloc bit cleared means "ignore it." */
 372		if ((shdr->sh_flags & SHF_ALLOC)
 373		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
 374			return i;
 375	}
 376	return 0;
 377}
 378
 379/* Find a module section, or NULL. */
 380static void *section_addr(const struct load_info *info, const char *name)
 381{
 382	/* Section 0 has sh_addr 0. */
 383	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
 384}
 385
 386/* Find a module section, or NULL.  Fill in number of "objects" in section. */
 387static void *section_objs(const struct load_info *info,
 388			  const char *name,
 389			  size_t object_size,
 390			  unsigned int *num)
 391{
 392	unsigned int sec = find_sec(info, name);
 393
 394	/* Section 0 has sh_addr 0 and sh_size 0. */
 395	*num = info->sechdrs[sec].sh_size / object_size;
 396	return (void *)info->sechdrs[sec].sh_addr;
 397}
 398
 399/* Provided by the linker */
 400extern const struct kernel_symbol __start___ksymtab[];
 401extern const struct kernel_symbol __stop___ksymtab[];
 402extern const struct kernel_symbol __start___ksymtab_gpl[];
 403extern const struct kernel_symbol __stop___ksymtab_gpl[];
 404extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 405extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
 406extern const s32 __start___kcrctab[];
 407extern const s32 __start___kcrctab_gpl[];
 408extern const s32 __start___kcrctab_gpl_future[];
 409#ifdef CONFIG_UNUSED_SYMBOLS
 410extern const struct kernel_symbol __start___ksymtab_unused[];
 411extern const struct kernel_symbol __stop___ksymtab_unused[];
 412extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 413extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
 414extern const s32 __start___kcrctab_unused[];
 415extern const s32 __start___kcrctab_unused_gpl[];
 416#endif
 417
 418#ifndef CONFIG_MODVERSIONS
 419#define symversion(base, idx) NULL
 420#else
 421#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
 422#endif
 423
 424static bool each_symbol_in_section(const struct symsearch *arr,
 425				   unsigned int arrsize,
 426				   struct module *owner,
 427				   bool (*fn)(const struct symsearch *syms,
 428					      struct module *owner,
 429					      void *data),
 430				   void *data)
 431{
 432	unsigned int j;
 433
 434	for (j = 0; j < arrsize; j++) {
 435		if (fn(&arr[j], owner, data))
 436			return true;
 437	}
 438
 439	return false;
 440}
 441
 442/* Returns true as soon as fn returns true, otherwise false. */
 443bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 444				    struct module *owner,
 445				    void *data),
 446			 void *data)
 447{
 448	struct module *mod;
 449	static const struct symsearch arr[] = {
 450		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
 451		  NOT_GPL_ONLY, false },
 452		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
 453		  __start___kcrctab_gpl,
 454		  GPL_ONLY, false },
 455		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
 456		  __start___kcrctab_gpl_future,
 457		  WILL_BE_GPL_ONLY, false },
 458#ifdef CONFIG_UNUSED_SYMBOLS
 459		{ __start___ksymtab_unused, __stop___ksymtab_unused,
 460		  __start___kcrctab_unused,
 461		  NOT_GPL_ONLY, true },
 462		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
 463		  __start___kcrctab_unused_gpl,
 464		  GPL_ONLY, true },
 465#endif
 466	};
 467
 468	module_assert_mutex_or_preempt();
 469
 470	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
 471		return true;
 472
 473	list_for_each_entry_rcu(mod, &modules, list) {
 
 474		struct symsearch arr[] = {
 475			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
 476			  NOT_GPL_ONLY, false },
 477			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
 478			  mod->gpl_crcs,
 479			  GPL_ONLY, false },
 480			{ mod->gpl_future_syms,
 481			  mod->gpl_future_syms + mod->num_gpl_future_syms,
 482			  mod->gpl_future_crcs,
 483			  WILL_BE_GPL_ONLY, false },
 484#ifdef CONFIG_UNUSED_SYMBOLS
 485			{ mod->unused_syms,
 486			  mod->unused_syms + mod->num_unused_syms,
 487			  mod->unused_crcs,
 488			  NOT_GPL_ONLY, true },
 489			{ mod->unused_gpl_syms,
 490			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
 491			  mod->unused_gpl_crcs,
 492			  GPL_ONLY, true },
 493#endif
 494		};
 495
 496		if (mod->state == MODULE_STATE_UNFORMED)
 497			continue;
 498
 499		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
 500			return true;
 501	}
 502	return false;
 503}
 504EXPORT_SYMBOL_GPL(each_symbol_section);
 505
 506struct find_symbol_arg {
 507	/* Input */
 508	const char *name;
 509	bool gplok;
 510	bool warn;
 511
 512	/* Output */
 513	struct module *owner;
 514	const s32 *crc;
 515	const struct kernel_symbol *sym;
 
 516};
 517
 518static bool check_symbol(const struct symsearch *syms,
 519				 struct module *owner,
 520				 unsigned int symnum, void *data)
 521{
 522	struct find_symbol_arg *fsa = data;
 523
 524	if (!fsa->gplok) {
 525		if (syms->licence == GPL_ONLY)
 526			return false;
 527		if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
 528			pr_warn("Symbol %s is being used by a non-GPL module, "
 529				"which will not be allowed in the future\n",
 530				fsa->name);
 531		}
 532	}
 533
 534#ifdef CONFIG_UNUSED_SYMBOLS
 535	if (syms->unused && fsa->warn) {
 536		pr_warn("Symbol %s is marked as UNUSED, however this module is "
 537			"using it.\n", fsa->name);
 538		pr_warn("This symbol will go away in the future.\n");
 539		pr_warn("Please evaluate if this is the right api to use and "
 540			"if it really is, submit a report to the linux kernel "
 541			"mailing list together with submitting your code for "
 542			"inclusion.\n");
 543	}
 544#endif
 545
 546	fsa->owner = owner;
 547	fsa->crc = symversion(syms->crcs, symnum);
 548	fsa->sym = &syms->start[symnum];
 
 549	return true;
 550}
 551
 552static int cmp_name(const void *va, const void *vb)
 
 
 
 
 
 
 
 
 
 553{
 554	const char *a;
 555	const struct kernel_symbol *b;
 556	a = va; b = vb;
 557	return strcmp(a, b->name);
 
 558}
 559
 560static bool find_symbol_in_section(const struct symsearch *syms,
 561				   struct module *owner,
 562				   void *data)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 563{
 564	struct find_symbol_arg *fsa = data;
 565	struct kernel_symbol *sym;
 566
 567	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
 568			sizeof(struct kernel_symbol), cmp_name);
 569
 570	if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
 
 571		return true;
 572
 573	return false;
 574}
 575
 576/* Find a symbol and return it, along with, (optional) crc and
 577 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 578const struct kernel_symbol *find_symbol(const char *name,
 579					struct module **owner,
 580					const s32 **crc,
 
 581					bool gplok,
 582					bool warn)
 583{
 584	struct find_symbol_arg fsa;
 585
 586	fsa.name = name;
 587	fsa.gplok = gplok;
 588	fsa.warn = warn;
 589
 590	if (each_symbol_section(find_symbol_in_section, &fsa)) {
 591		if (owner)
 592			*owner = fsa.owner;
 593		if (crc)
 594			*crc = fsa.crc;
 
 
 595		return fsa.sym;
 596	}
 597
 598	pr_debug("Failed to find symbol %s\n", name);
 599	return NULL;
 600}
 601EXPORT_SYMBOL_GPL(find_symbol);
 602
 603/*
 604 * Search for module by name: must hold module_mutex (or preempt disabled
 605 * for read-only access).
 606 */
 607static struct module *find_module_all(const char *name, size_t len,
 608				      bool even_unformed)
 609{
 610	struct module *mod;
 611
 612	module_assert_mutex_or_preempt();
 613
 614	list_for_each_entry_rcu(mod, &modules, list) {
 
 615		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
 616			continue;
 617		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
 618			return mod;
 619	}
 620	return NULL;
 621}
 622
 623struct module *find_module(const char *name)
 624{
 625	module_assert_mutex();
 626	return find_module_all(name, strlen(name), false);
 627}
 628EXPORT_SYMBOL_GPL(find_module);
 629
 630#ifdef CONFIG_SMP
 631
 632static inline void __percpu *mod_percpu(struct module *mod)
 633{
 634	return mod->percpu;
 635}
 636
 637static int percpu_modalloc(struct module *mod, struct load_info *info)
 638{
 639	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
 640	unsigned long align = pcpusec->sh_addralign;
 641
 642	if (!pcpusec->sh_size)
 643		return 0;
 644
 645	if (align > PAGE_SIZE) {
 646		pr_warn("%s: per-cpu alignment %li > %li\n",
 647			mod->name, align, PAGE_SIZE);
 648		align = PAGE_SIZE;
 649	}
 650
 651	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
 652	if (!mod->percpu) {
 653		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
 654			mod->name, (unsigned long)pcpusec->sh_size);
 655		return -ENOMEM;
 656	}
 657	mod->percpu_size = pcpusec->sh_size;
 658	return 0;
 659}
 660
 661static void percpu_modfree(struct module *mod)
 662{
 663	free_percpu(mod->percpu);
 664}
 665
 666static unsigned int find_pcpusec(struct load_info *info)
 667{
 668	return find_sec(info, ".data..percpu");
 669}
 670
 671static void percpu_modcopy(struct module *mod,
 672			   const void *from, unsigned long size)
 673{
 674	int cpu;
 675
 676	for_each_possible_cpu(cpu)
 677		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
 678}
 679
 680bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 681{
 682	struct module *mod;
 683	unsigned int cpu;
 684
 685	preempt_disable();
 686
 687	list_for_each_entry_rcu(mod, &modules, list) {
 688		if (mod->state == MODULE_STATE_UNFORMED)
 689			continue;
 690		if (!mod->percpu_size)
 691			continue;
 692		for_each_possible_cpu(cpu) {
 693			void *start = per_cpu_ptr(mod->percpu, cpu);
 694			void *va = (void *)addr;
 695
 696			if (va >= start && va < start + mod->percpu_size) {
 697				if (can_addr) {
 698					*can_addr = (unsigned long) (va - start);
 699					*can_addr += (unsigned long)
 700						per_cpu_ptr(mod->percpu,
 701							    get_boot_cpu_id());
 702				}
 703				preempt_enable();
 704				return true;
 705			}
 706		}
 707	}
 708
 709	preempt_enable();
 710	return false;
 711}
 712
 713/**
 714 * is_module_percpu_address - test whether address is from module static percpu
 715 * @addr: address to test
 716 *
 717 * Test whether @addr belongs to module static percpu area.
 718 *
 719 * RETURNS:
 720 * %true if @addr is from module static percpu area
 721 */
 722bool is_module_percpu_address(unsigned long addr)
 723{
 724	return __is_module_percpu_address(addr, NULL);
 725}
 726
 727#else /* ... !CONFIG_SMP */
 728
 729static inline void __percpu *mod_percpu(struct module *mod)
 730{
 731	return NULL;
 732}
 733static int percpu_modalloc(struct module *mod, struct load_info *info)
 734{
 735	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
 736	if (info->sechdrs[info->index.pcpu].sh_size != 0)
 737		return -ENOMEM;
 738	return 0;
 739}
 740static inline void percpu_modfree(struct module *mod)
 741{
 742}
 743static unsigned int find_pcpusec(struct load_info *info)
 744{
 745	return 0;
 746}
 747static inline void percpu_modcopy(struct module *mod,
 748				  const void *from, unsigned long size)
 749{
 750	/* pcpusec should be 0, and size of that section should be 0. */
 751	BUG_ON(size != 0);
 752}
 753bool is_module_percpu_address(unsigned long addr)
 754{
 755	return false;
 756}
 757
 758bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 759{
 760	return false;
 761}
 762
 763#endif /* CONFIG_SMP */
 764
 765#define MODINFO_ATTR(field)	\
 766static void setup_modinfo_##field(struct module *mod, const char *s)  \
 767{                                                                     \
 768	mod->field = kstrdup(s, GFP_KERNEL);                          \
 769}                                                                     \
 770static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
 771			struct module_kobject *mk, char *buffer)      \
 772{                                                                     \
 773	return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
 774}                                                                     \
 775static int modinfo_##field##_exists(struct module *mod)               \
 776{                                                                     \
 777	return mod->field != NULL;                                    \
 778}                                                                     \
 779static void free_modinfo_##field(struct module *mod)                  \
 780{                                                                     \
 781	kfree(mod->field);                                            \
 782	mod->field = NULL;                                            \
 783}                                                                     \
 784static struct module_attribute modinfo_##field = {                    \
 785	.attr = { .name = __stringify(field), .mode = 0444 },         \
 786	.show = show_modinfo_##field,                                 \
 787	.setup = setup_modinfo_##field,                               \
 788	.test = modinfo_##field##_exists,                             \
 789	.free = free_modinfo_##field,                                 \
 790};
 791
 792MODINFO_ATTR(version);
 793MODINFO_ATTR(srcversion);
 794
 795static char last_unloaded_module[MODULE_NAME_LEN+1];
 796
 797#ifdef CONFIG_MODULE_UNLOAD
 798
 799EXPORT_TRACEPOINT_SYMBOL(module_get);
 800
 801/* MODULE_REF_BASE is the base reference count by kmodule loader. */
 802#define MODULE_REF_BASE	1
 803
 804/* Init the unload section of the module. */
 805static int module_unload_init(struct module *mod)
 806{
 807	/*
 808	 * Initialize reference counter to MODULE_REF_BASE.
 809	 * refcnt == 0 means module is going.
 810	 */
 811	atomic_set(&mod->refcnt, MODULE_REF_BASE);
 812
 813	INIT_LIST_HEAD(&mod->source_list);
 814	INIT_LIST_HEAD(&mod->target_list);
 815
 816	/* Hold reference count during initialization. */
 817	atomic_inc(&mod->refcnt);
 818
 819	return 0;
 820}
 821
 822/* Does a already use b? */
 823static int already_uses(struct module *a, struct module *b)
 824{
 825	struct module_use *use;
 826
 827	list_for_each_entry(use, &b->source_list, source_list) {
 828		if (use->source == a) {
 829			pr_debug("%s uses %s!\n", a->name, b->name);
 830			return 1;
 831		}
 832	}
 833	pr_debug("%s does not use %s!\n", a->name, b->name);
 834	return 0;
 835}
 836
 837/*
 838 * Module a uses b
 839 *  - we add 'a' as a "source", 'b' as a "target" of module use
 840 *  - the module_use is added to the list of 'b' sources (so
 841 *    'b' can walk the list to see who sourced them), and of 'a'
 842 *    targets (so 'a' can see what modules it targets).
 843 */
 844static int add_module_usage(struct module *a, struct module *b)
 845{
 846	struct module_use *use;
 847
 848	pr_debug("Allocating new usage for %s.\n", a->name);
 849	use = kmalloc(sizeof(*use), GFP_ATOMIC);
 850	if (!use)
 851		return -ENOMEM;
 852
 853	use->source = a;
 854	use->target = b;
 855	list_add(&use->source_list, &b->source_list);
 856	list_add(&use->target_list, &a->target_list);
 857	return 0;
 858}
 859
 860/* Module a uses b: caller needs module_mutex() */
 861int ref_module(struct module *a, struct module *b)
 862{
 863	int err;
 864
 865	if (b == NULL || already_uses(a, b))
 866		return 0;
 867
 868	/* If module isn't available, we fail. */
 869	err = strong_try_module_get(b);
 870	if (err)
 871		return err;
 872
 873	err = add_module_usage(a, b);
 874	if (err) {
 875		module_put(b);
 876		return err;
 877	}
 878	return 0;
 879}
 880EXPORT_SYMBOL_GPL(ref_module);
 881
 882/* Clear the unload stuff of the module. */
 883static void module_unload_free(struct module *mod)
 884{
 885	struct module_use *use, *tmp;
 886
 887	mutex_lock(&module_mutex);
 888	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
 889		struct module *i = use->target;
 890		pr_debug("%s unusing %s\n", mod->name, i->name);
 891		module_put(i);
 892		list_del(&use->source_list);
 893		list_del(&use->target_list);
 894		kfree(use);
 895	}
 896	mutex_unlock(&module_mutex);
 897}
 898
 899#ifdef CONFIG_MODULE_FORCE_UNLOAD
 900static inline int try_force_unload(unsigned int flags)
 901{
 902	int ret = (flags & O_TRUNC);
 903	if (ret)
 904		add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
 905	return ret;
 906}
 907#else
 908static inline int try_force_unload(unsigned int flags)
 909{
 910	return 0;
 911}
 912#endif /* CONFIG_MODULE_FORCE_UNLOAD */
 913
 914/* Try to release refcount of module, 0 means success. */
 915static int try_release_module_ref(struct module *mod)
 916{
 917	int ret;
 918
 919	/* Try to decrement refcnt which we set at loading */
 920	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
 921	BUG_ON(ret < 0);
 922	if (ret)
 923		/* Someone can put this right now, recover with checking */
 924		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
 925
 926	return ret;
 927}
 928
 929static int try_stop_module(struct module *mod, int flags, int *forced)
 930{
 931	/* If it's not unused, quit unless we're forcing. */
 932	if (try_release_module_ref(mod) != 0) {
 933		*forced = try_force_unload(flags);
 934		if (!(*forced))
 935			return -EWOULDBLOCK;
 936	}
 937
 938	/* Mark it as dying. */
 939	mod->state = MODULE_STATE_GOING;
 940
 941	return 0;
 942}
 943
 944/**
 945 * module_refcount - return the refcount or -1 if unloading
 946 *
 947 * @mod:	the module we're checking
 948 *
 949 * Returns:
 950 *	-1 if the module is in the process of unloading
 951 *	otherwise the number of references in the kernel to the module
 952 */
 953int module_refcount(struct module *mod)
 954{
 955	return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
 956}
 957EXPORT_SYMBOL(module_refcount);
 958
 959/* This exists whether we can unload or not */
 960static void free_module(struct module *mod);
 961
 962SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 963		unsigned int, flags)
 964{
 965	struct module *mod;
 966	char name[MODULE_NAME_LEN];
 967	int ret, forced = 0;
 968
 969	if (!capable(CAP_SYS_MODULE) || modules_disabled)
 970		return -EPERM;
 971
 972	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
 973		return -EFAULT;
 974	name[MODULE_NAME_LEN-1] = '\0';
 975
 976	audit_log_kern_module(name);
 977
 978	if (mutex_lock_interruptible(&module_mutex) != 0)
 979		return -EINTR;
 980
 981	mod = find_module(name);
 982	if (!mod) {
 983		ret = -ENOENT;
 984		goto out;
 985	}
 986
 987	if (!list_empty(&mod->source_list)) {
 988		/* Other modules depend on us: get rid of them first. */
 989		ret = -EWOULDBLOCK;
 990		goto out;
 991	}
 992
 993	/* Doing init or already dying? */
 994	if (mod->state != MODULE_STATE_LIVE) {
 995		/* FIXME: if (force), slam module count damn the torpedoes */
 996		pr_debug("%s already dying\n", mod->name);
 997		ret = -EBUSY;
 998		goto out;
 999	}
1000
1001	/* If it has an init func, it must have an exit func to unload */
1002	if (mod->init && !mod->exit) {
1003		forced = try_force_unload(flags);
1004		if (!forced) {
1005			/* This module can't be removed */
1006			ret = -EBUSY;
1007			goto out;
1008		}
1009	}
1010
1011	/* Stop the machine so refcounts can't move and disable module. */
1012	ret = try_stop_module(mod, flags, &forced);
1013	if (ret != 0)
1014		goto out;
1015
1016	mutex_unlock(&module_mutex);
1017	/* Final destruction now no one is using it. */
1018	if (mod->exit != NULL)
1019		mod->exit();
1020	blocking_notifier_call_chain(&module_notify_list,
1021				     MODULE_STATE_GOING, mod);
1022	klp_module_going(mod);
1023	ftrace_release_mod(mod);
1024
1025	async_synchronize_full();
1026
1027	/* Store the name of the last unloaded module for diagnostic purposes */
1028	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
1029
1030	free_module(mod);
 
 
1031	return 0;
1032out:
1033	mutex_unlock(&module_mutex);
1034	return ret;
1035}
1036
1037static inline void print_unload_info(struct seq_file *m, struct module *mod)
1038{
1039	struct module_use *use;
1040	int printed_something = 0;
1041
1042	seq_printf(m, " %i ", module_refcount(mod));
1043
1044	/*
1045	 * Always include a trailing , so userspace can differentiate
1046	 * between this and the old multi-field proc format.
1047	 */
1048	list_for_each_entry(use, &mod->source_list, source_list) {
1049		printed_something = 1;
1050		seq_printf(m, "%s,", use->source->name);
1051	}
1052
1053	if (mod->init != NULL && mod->exit == NULL) {
1054		printed_something = 1;
1055		seq_puts(m, "[permanent],");
1056	}
1057
1058	if (!printed_something)
1059		seq_puts(m, "-");
1060}
1061
1062void __symbol_put(const char *symbol)
1063{
1064	struct module *owner;
1065
1066	preempt_disable();
1067	if (!find_symbol(symbol, &owner, NULL, true, false))
1068		BUG();
1069	module_put(owner);
1070	preempt_enable();
1071}
1072EXPORT_SYMBOL(__symbol_put);
1073
1074/* Note this assumes addr is a function, which it currently always is. */
1075void symbol_put_addr(void *addr)
1076{
1077	struct module *modaddr;
1078	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1079
1080	if (core_kernel_text(a))
1081		return;
1082
1083	/*
1084	 * Even though we hold a reference on the module; we still need to
1085	 * disable preemption in order to safely traverse the data structure.
1086	 */
1087	preempt_disable();
1088	modaddr = __module_text_address(a);
1089	BUG_ON(!modaddr);
1090	module_put(modaddr);
1091	preempt_enable();
1092}
1093EXPORT_SYMBOL_GPL(symbol_put_addr);
1094
1095static ssize_t show_refcnt(struct module_attribute *mattr,
1096			   struct module_kobject *mk, char *buffer)
1097{
1098	return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1099}
1100
1101static struct module_attribute modinfo_refcnt =
1102	__ATTR(refcnt, 0444, show_refcnt, NULL);
1103
1104void __module_get(struct module *module)
1105{
1106	if (module) {
1107		preempt_disable();
1108		atomic_inc(&module->refcnt);
1109		trace_module_get(module, _RET_IP_);
1110		preempt_enable();
1111	}
1112}
1113EXPORT_SYMBOL(__module_get);
1114
1115bool try_module_get(struct module *module)
1116{
1117	bool ret = true;
1118
1119	if (module) {
1120		preempt_disable();
1121		/* Note: here, we can fail to get a reference */
1122		if (likely(module_is_live(module) &&
1123			   atomic_inc_not_zero(&module->refcnt) != 0))
1124			trace_module_get(module, _RET_IP_);
1125		else
1126			ret = false;
1127
1128		preempt_enable();
1129	}
1130	return ret;
1131}
1132EXPORT_SYMBOL(try_module_get);
1133
1134void module_put(struct module *module)
1135{
1136	int ret;
1137
1138	if (module) {
1139		preempt_disable();
1140		ret = atomic_dec_if_positive(&module->refcnt);
1141		WARN_ON(ret < 0);	/* Failed to put refcount */
1142		trace_module_put(module, _RET_IP_);
1143		preempt_enable();
1144	}
1145}
1146EXPORT_SYMBOL(module_put);
1147
1148#else /* !CONFIG_MODULE_UNLOAD */
1149static inline void print_unload_info(struct seq_file *m, struct module *mod)
1150{
1151	/* We don't know the usage count, or what modules are using. */
1152	seq_puts(m, " - -");
1153}
1154
1155static inline void module_unload_free(struct module *mod)
1156{
1157}
1158
1159int ref_module(struct module *a, struct module *b)
1160{
1161	return strong_try_module_get(b);
1162}
1163EXPORT_SYMBOL_GPL(ref_module);
1164
1165static inline int module_unload_init(struct module *mod)
1166{
1167	return 0;
1168}
1169#endif /* CONFIG_MODULE_UNLOAD */
1170
1171static size_t module_flags_taint(struct module *mod, char *buf)
1172{
1173	size_t l = 0;
1174	int i;
1175
1176	for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1177		if (taint_flags[i].module && test_bit(i, &mod->taints))
1178			buf[l++] = taint_flags[i].c_true;
1179	}
1180
1181	return l;
1182}
1183
1184static ssize_t show_initstate(struct module_attribute *mattr,
1185			      struct module_kobject *mk, char *buffer)
1186{
1187	const char *state = "unknown";
1188
1189	switch (mk->mod->state) {
1190	case MODULE_STATE_LIVE:
1191		state = "live";
1192		break;
1193	case MODULE_STATE_COMING:
1194		state = "coming";
1195		break;
1196	case MODULE_STATE_GOING:
1197		state = "going";
1198		break;
1199	default:
1200		BUG();
1201	}
1202	return sprintf(buffer, "%s\n", state);
1203}
1204
1205static struct module_attribute modinfo_initstate =
1206	__ATTR(initstate, 0444, show_initstate, NULL);
1207
1208static ssize_t store_uevent(struct module_attribute *mattr,
1209			    struct module_kobject *mk,
1210			    const char *buffer, size_t count)
1211{
1212	kobject_synth_uevent(&mk->kobj, buffer, count);
1213	return count;
 
 
1214}
1215
1216struct module_attribute module_uevent =
1217	__ATTR(uevent, 0200, NULL, store_uevent);
1218
1219static ssize_t show_coresize(struct module_attribute *mattr,
1220			     struct module_kobject *mk, char *buffer)
1221{
1222	return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1223}
1224
1225static struct module_attribute modinfo_coresize =
1226	__ATTR(coresize, 0444, show_coresize, NULL);
1227
1228static ssize_t show_initsize(struct module_attribute *mattr,
1229			     struct module_kobject *mk, char *buffer)
1230{
1231	return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1232}
1233
1234static struct module_attribute modinfo_initsize =
1235	__ATTR(initsize, 0444, show_initsize, NULL);
1236
1237static ssize_t show_taint(struct module_attribute *mattr,
1238			  struct module_kobject *mk, char *buffer)
1239{
1240	size_t l;
1241
1242	l = module_flags_taint(mk->mod, buffer);
1243	buffer[l++] = '\n';
1244	return l;
1245}
1246
1247static struct module_attribute modinfo_taint =
1248	__ATTR(taint, 0444, show_taint, NULL);
1249
1250static struct module_attribute *modinfo_attrs[] = {
1251	&module_uevent,
1252	&modinfo_version,
1253	&modinfo_srcversion,
1254	&modinfo_initstate,
1255	&modinfo_coresize,
1256	&modinfo_initsize,
1257	&modinfo_taint,
1258#ifdef CONFIG_MODULE_UNLOAD
1259	&modinfo_refcnt,
1260#endif
1261	NULL,
1262};
1263
1264static const char vermagic[] = VERMAGIC_STRING;
1265
1266static int try_to_force_load(struct module *mod, const char *reason)
1267{
1268#ifdef CONFIG_MODULE_FORCE_LOAD
1269	if (!test_taint(TAINT_FORCED_MODULE))
1270		pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1271	add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1272	return 0;
1273#else
1274	return -ENOEXEC;
1275#endif
1276}
1277
1278#ifdef CONFIG_MODVERSIONS
1279
1280static u32 resolve_rel_crc(const s32 *crc)
1281{
1282	return *(u32 *)((void *)crc + *crc);
1283}
1284
1285static int check_version(const struct load_info *info,
1286			 const char *symname,
1287			 struct module *mod,
1288			 const s32 *crc)
1289{
1290	Elf_Shdr *sechdrs = info->sechdrs;
1291	unsigned int versindex = info->index.vers;
1292	unsigned int i, num_versions;
1293	struct modversion_info *versions;
1294
1295	/* Exporting module didn't supply crcs?  OK, we're already tainted. */
1296	if (!crc)
1297		return 1;
1298
1299	/* No versions at all?  modprobe --force does this. */
1300	if (versindex == 0)
1301		return try_to_force_load(mod, symname) == 0;
1302
1303	versions = (void *) sechdrs[versindex].sh_addr;
1304	num_versions = sechdrs[versindex].sh_size
1305		/ sizeof(struct modversion_info);
1306
1307	for (i = 0; i < num_versions; i++) {
1308		u32 crcval;
1309
1310		if (strcmp(versions[i].name, symname) != 0)
1311			continue;
1312
1313		if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1314			crcval = resolve_rel_crc(crc);
1315		else
1316			crcval = *crc;
1317		if (versions[i].crc == crcval)
1318			return 1;
1319		pr_debug("Found checksum %X vs module %lX\n",
1320			 crcval, versions[i].crc);
1321		goto bad_version;
1322	}
1323
1324	/* Broken toolchain. Warn once, then let it go.. */
1325	pr_warn_once("%s: no symbol version for %s\n", info->name, symname);
1326	return 1;
1327
1328bad_version:
1329	pr_warn("%s: disagrees about version of symbol %s\n",
1330	       info->name, symname);
1331	return 0;
1332}
1333
1334static inline int check_modstruct_version(const struct load_info *info,
1335					  struct module *mod)
1336{
1337	const s32 *crc;
1338
1339	/*
1340	 * Since this should be found in kernel (which can't be removed), no
1341	 * locking is necessary -- use preempt_disable() to placate lockdep.
1342	 */
1343	preempt_disable();
1344	if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1345			 &crc, true, false)) {
1346		preempt_enable();
1347		BUG();
1348	}
1349	preempt_enable();
1350	return check_version(info, VMLINUX_SYMBOL_STR(module_layout),
1351			     mod, crc);
1352}
1353
1354/* First part is kernel version, which we ignore if module has crcs. */
1355static inline int same_magic(const char *amagic, const char *bmagic,
1356			     bool has_crcs)
1357{
1358	if (has_crcs) {
1359		amagic += strcspn(amagic, " ");
1360		bmagic += strcspn(bmagic, " ");
1361	}
1362	return strcmp(amagic, bmagic) == 0;
1363}
1364#else
1365static inline int check_version(const struct load_info *info,
1366				const char *symname,
1367				struct module *mod,
1368				const s32 *crc)
1369{
1370	return 1;
1371}
1372
1373static inline int check_modstruct_version(const struct load_info *info,
1374					  struct module *mod)
1375{
1376	return 1;
1377}
1378
1379static inline int same_magic(const char *amagic, const char *bmagic,
1380			     bool has_crcs)
1381{
1382	return strcmp(amagic, bmagic) == 0;
1383}
1384#endif /* CONFIG_MODVERSIONS */
1385
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1386/* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1387static const struct kernel_symbol *resolve_symbol(struct module *mod,
1388						  const struct load_info *info,
1389						  const char *name,
1390						  char ownername[])
1391{
1392	struct module *owner;
1393	const struct kernel_symbol *sym;
1394	const s32 *crc;
 
1395	int err;
1396
1397	/*
1398	 * The module_mutex should not be a heavily contended lock;
1399	 * if we get the occasional sleep here, we'll go an extra iteration
1400	 * in the wait_event_interruptible(), which is harmless.
1401	 */
1402	sched_annotate_sleep();
1403	mutex_lock(&module_mutex);
1404	sym = find_symbol(name, &owner, &crc,
1405			  !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1406	if (!sym)
1407		goto unlock;
1408
 
 
 
 
 
 
 
 
1409	if (!check_version(info, name, mod, crc)) {
1410		sym = ERR_PTR(-EINVAL);
1411		goto getname;
1412	}
1413
 
 
 
 
 
 
1414	err = ref_module(mod, owner);
1415	if (err) {
1416		sym = ERR_PTR(err);
1417		goto getname;
1418	}
1419
1420getname:
1421	/* We must make copy under the lock if we failed to get ref. */
1422	strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1423unlock:
1424	mutex_unlock(&module_mutex);
1425	return sym;
1426}
1427
1428static const struct kernel_symbol *
1429resolve_symbol_wait(struct module *mod,
1430		    const struct load_info *info,
1431		    const char *name)
1432{
1433	const struct kernel_symbol *ksym;
1434	char owner[MODULE_NAME_LEN];
1435
1436	if (wait_event_interruptible_timeout(module_wq,
1437			!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1438			|| PTR_ERR(ksym) != -EBUSY,
1439					     30 * HZ) <= 0) {
1440		pr_warn("%s: gave up waiting for init of module %s.\n",
1441			mod->name, owner);
1442	}
1443	return ksym;
1444}
1445
1446/*
1447 * /sys/module/foo/sections stuff
1448 * J. Corbet <corbet@lwn.net>
1449 */
1450#ifdef CONFIG_SYSFS
1451
1452#ifdef CONFIG_KALLSYMS
1453static inline bool sect_empty(const Elf_Shdr *sect)
1454{
1455	return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1456}
1457
1458struct module_sect_attr {
1459	struct module_attribute mattr;
1460	char *name;
1461	unsigned long address;
1462};
1463
1464struct module_sect_attrs {
1465	struct attribute_group grp;
1466	unsigned int nsections;
1467	struct module_sect_attr attrs[0];
1468};
1469
1470static ssize_t module_sect_show(struct module_attribute *mattr,
1471				struct module_kobject *mk, char *buf)
 
 
1472{
1473	struct module_sect_attr *sattr =
1474		container_of(mattr, struct module_sect_attr, mattr);
1475	return sprintf(buf, "0x%px\n", kptr_restrict < 2 ?
1476		       (void *)sattr->address : NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477}
1478
1479static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1480{
1481	unsigned int section;
1482
1483	for (section = 0; section < sect_attrs->nsections; section++)
1484		kfree(sect_attrs->attrs[section].name);
1485	kfree(sect_attrs);
1486}
1487
1488static void add_sect_attrs(struct module *mod, const struct load_info *info)
1489{
1490	unsigned int nloaded = 0, i, size[2];
1491	struct module_sect_attrs *sect_attrs;
1492	struct module_sect_attr *sattr;
1493	struct attribute **gattr;
1494
1495	/* Count loaded sections and allocate structures */
1496	for (i = 0; i < info->hdr->e_shnum; i++)
1497		if (!sect_empty(&info->sechdrs[i]))
1498			nloaded++;
1499	size[0] = ALIGN(sizeof(*sect_attrs)
1500			+ nloaded * sizeof(sect_attrs->attrs[0]),
1501			sizeof(sect_attrs->grp.attrs[0]));
1502	size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1503	sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1504	if (sect_attrs == NULL)
1505		return;
1506
1507	/* Setup section attributes. */
1508	sect_attrs->grp.name = "sections";
1509	sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1510
1511	sect_attrs->nsections = 0;
1512	sattr = &sect_attrs->attrs[0];
1513	gattr = &sect_attrs->grp.attrs[0];
1514	for (i = 0; i < info->hdr->e_shnum; i++) {
1515		Elf_Shdr *sec = &info->sechdrs[i];
1516		if (sect_empty(sec))
1517			continue;
 
1518		sattr->address = sec->sh_addr;
1519		sattr->name = kstrdup(info->secstrings + sec->sh_name,
1520					GFP_KERNEL);
1521		if (sattr->name == NULL)
1522			goto out;
1523		sect_attrs->nsections++;
1524		sysfs_attr_init(&sattr->mattr.attr);
1525		sattr->mattr.show = module_sect_show;
1526		sattr->mattr.store = NULL;
1527		sattr->mattr.attr.name = sattr->name;
1528		sattr->mattr.attr.mode = S_IRUSR;
1529		*(gattr++) = &(sattr++)->mattr.attr;
1530	}
1531	*gattr = NULL;
1532
1533	if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1534		goto out;
1535
1536	mod->sect_attrs = sect_attrs;
1537	return;
1538  out:
1539	free_sect_attrs(sect_attrs);
1540}
1541
1542static void remove_sect_attrs(struct module *mod)
1543{
1544	if (mod->sect_attrs) {
1545		sysfs_remove_group(&mod->mkobj.kobj,
1546				   &mod->sect_attrs->grp);
1547		/* We are positive that no one is using any sect attrs
1548		 * at this point.  Deallocate immediately. */
1549		free_sect_attrs(mod->sect_attrs);
1550		mod->sect_attrs = NULL;
1551	}
1552}
1553
1554/*
1555 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1556 */
1557
1558struct module_notes_attrs {
1559	struct kobject *dir;
1560	unsigned int notes;
1561	struct bin_attribute attrs[0];
1562};
1563
1564static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1565				 struct bin_attribute *bin_attr,
1566				 char *buf, loff_t pos, size_t count)
1567{
1568	/*
1569	 * The caller checked the pos and count against our size.
1570	 */
1571	memcpy(buf, bin_attr->private + pos, count);
1572	return count;
1573}
1574
1575static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1576			     unsigned int i)
1577{
1578	if (notes_attrs->dir) {
1579		while (i-- > 0)
1580			sysfs_remove_bin_file(notes_attrs->dir,
1581					      &notes_attrs->attrs[i]);
1582		kobject_put(notes_attrs->dir);
1583	}
1584	kfree(notes_attrs);
1585}
1586
1587static void add_notes_attrs(struct module *mod, const struct load_info *info)
1588{
1589	unsigned int notes, loaded, i;
1590	struct module_notes_attrs *notes_attrs;
1591	struct bin_attribute *nattr;
1592
1593	/* failed to create section attributes, so can't create notes */
1594	if (!mod->sect_attrs)
1595		return;
1596
1597	/* Count notes sections and allocate structures.  */
1598	notes = 0;
1599	for (i = 0; i < info->hdr->e_shnum; i++)
1600		if (!sect_empty(&info->sechdrs[i]) &&
1601		    (info->sechdrs[i].sh_type == SHT_NOTE))
1602			++notes;
1603
1604	if (notes == 0)
1605		return;
1606
1607	notes_attrs = kzalloc(sizeof(*notes_attrs)
1608			      + notes * sizeof(notes_attrs->attrs[0]),
1609			      GFP_KERNEL);
1610	if (notes_attrs == NULL)
1611		return;
1612
1613	notes_attrs->notes = notes;
1614	nattr = &notes_attrs->attrs[0];
1615	for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1616		if (sect_empty(&info->sechdrs[i]))
1617			continue;
1618		if (info->sechdrs[i].sh_type == SHT_NOTE) {
1619			sysfs_bin_attr_init(nattr);
1620			nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1621			nattr->attr.mode = S_IRUGO;
1622			nattr->size = info->sechdrs[i].sh_size;
1623			nattr->private = (void *) info->sechdrs[i].sh_addr;
1624			nattr->read = module_notes_read;
1625			++nattr;
1626		}
1627		++loaded;
1628	}
1629
1630	notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1631	if (!notes_attrs->dir)
1632		goto out;
1633
1634	for (i = 0; i < notes; ++i)
1635		if (sysfs_create_bin_file(notes_attrs->dir,
1636					  &notes_attrs->attrs[i]))
1637			goto out;
1638
1639	mod->notes_attrs = notes_attrs;
1640	return;
1641
1642  out:
1643	free_notes_attrs(notes_attrs, i);
1644}
1645
1646static void remove_notes_attrs(struct module *mod)
1647{
1648	if (mod->notes_attrs)
1649		free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1650}
1651
1652#else
1653
1654static inline void add_sect_attrs(struct module *mod,
1655				  const struct load_info *info)
1656{
1657}
1658
1659static inline void remove_sect_attrs(struct module *mod)
1660{
1661}
1662
1663static inline void add_notes_attrs(struct module *mod,
1664				   const struct load_info *info)
1665{
1666}
1667
1668static inline void remove_notes_attrs(struct module *mod)
1669{
1670}
1671#endif /* CONFIG_KALLSYMS */
1672
1673static void del_usage_links(struct module *mod)
1674{
1675#ifdef CONFIG_MODULE_UNLOAD
1676	struct module_use *use;
1677
1678	mutex_lock(&module_mutex);
1679	list_for_each_entry(use, &mod->target_list, target_list)
1680		sysfs_remove_link(use->target->holders_dir, mod->name);
1681	mutex_unlock(&module_mutex);
1682#endif
1683}
1684
1685static int add_usage_links(struct module *mod)
1686{
1687	int ret = 0;
1688#ifdef CONFIG_MODULE_UNLOAD
1689	struct module_use *use;
1690
1691	mutex_lock(&module_mutex);
1692	list_for_each_entry(use, &mod->target_list, target_list) {
1693		ret = sysfs_create_link(use->target->holders_dir,
1694					&mod->mkobj.kobj, mod->name);
1695		if (ret)
1696			break;
1697	}
1698	mutex_unlock(&module_mutex);
1699	if (ret)
1700		del_usage_links(mod);
1701#endif
1702	return ret;
1703}
1704
 
 
1705static int module_add_modinfo_attrs(struct module *mod)
1706{
1707	struct module_attribute *attr;
1708	struct module_attribute *temp_attr;
1709	int error = 0;
1710	int i;
1711
1712	mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1713					(ARRAY_SIZE(modinfo_attrs) + 1)),
1714					GFP_KERNEL);
1715	if (!mod->modinfo_attrs)
1716		return -ENOMEM;
1717
1718	temp_attr = mod->modinfo_attrs;
1719	for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1720		if (!attr->test || attr->test(mod)) {
1721			memcpy(temp_attr, attr, sizeof(*temp_attr));
1722			sysfs_attr_init(&temp_attr->attr);
1723			error = sysfs_create_file(&mod->mkobj.kobj,
1724					&temp_attr->attr);
 
 
1725			++temp_attr;
1726		}
1727	}
 
 
 
 
 
 
 
 
1728	return error;
1729}
1730
1731static void module_remove_modinfo_attrs(struct module *mod)
1732{
1733	struct module_attribute *attr;
1734	int i;
1735
1736	for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
 
 
1737		/* pick a field to test for end of list */
1738		if (!attr->attr.name)
1739			break;
1740		sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1741		if (attr->free)
1742			attr->free(mod);
1743	}
1744	kfree(mod->modinfo_attrs);
1745}
1746
1747static void mod_kobject_put(struct module *mod)
1748{
1749	DECLARE_COMPLETION_ONSTACK(c);
1750	mod->mkobj.kobj_completion = &c;
1751	kobject_put(&mod->mkobj.kobj);
1752	wait_for_completion(&c);
1753}
1754
1755static int mod_sysfs_init(struct module *mod)
1756{
1757	int err;
1758	struct kobject *kobj;
1759
1760	if (!module_sysfs_initialized) {
1761		pr_err("%s: module sysfs not initialized\n", mod->name);
1762		err = -EINVAL;
1763		goto out;
1764	}
1765
1766	kobj = kset_find_obj(module_kset, mod->name);
1767	if (kobj) {
1768		pr_err("%s: module is already loaded\n", mod->name);
1769		kobject_put(kobj);
1770		err = -EINVAL;
1771		goto out;
1772	}
1773
1774	mod->mkobj.mod = mod;
1775
1776	memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1777	mod->mkobj.kobj.kset = module_kset;
1778	err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1779				   "%s", mod->name);
1780	if (err)
1781		mod_kobject_put(mod);
1782
1783	/* delay uevent until full sysfs population */
1784out:
1785	return err;
1786}
1787
1788static int mod_sysfs_setup(struct module *mod,
1789			   const struct load_info *info,
1790			   struct kernel_param *kparam,
1791			   unsigned int num_params)
1792{
1793	int err;
1794
1795	err = mod_sysfs_init(mod);
1796	if (err)
1797		goto out;
1798
1799	mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1800	if (!mod->holders_dir) {
1801		err = -ENOMEM;
1802		goto out_unreg;
1803	}
1804
1805	err = module_param_sysfs_setup(mod, kparam, num_params);
1806	if (err)
1807		goto out_unreg_holders;
1808
1809	err = module_add_modinfo_attrs(mod);
1810	if (err)
1811		goto out_unreg_param;
1812
1813	err = add_usage_links(mod);
1814	if (err)
1815		goto out_unreg_modinfo_attrs;
1816
1817	add_sect_attrs(mod, info);
1818	add_notes_attrs(mod, info);
1819
1820	kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1821	return 0;
1822
1823out_unreg_modinfo_attrs:
1824	module_remove_modinfo_attrs(mod);
1825out_unreg_param:
1826	module_param_sysfs_remove(mod);
1827out_unreg_holders:
1828	kobject_put(mod->holders_dir);
1829out_unreg:
1830	mod_kobject_put(mod);
1831out:
1832	return err;
1833}
1834
1835static void mod_sysfs_fini(struct module *mod)
1836{
1837	remove_notes_attrs(mod);
1838	remove_sect_attrs(mod);
1839	mod_kobject_put(mod);
1840}
1841
1842static void init_param_lock(struct module *mod)
1843{
1844	mutex_init(&mod->param_lock);
1845}
1846#else /* !CONFIG_SYSFS */
1847
1848static int mod_sysfs_setup(struct module *mod,
1849			   const struct load_info *info,
1850			   struct kernel_param *kparam,
1851			   unsigned int num_params)
1852{
1853	return 0;
1854}
1855
1856static void mod_sysfs_fini(struct module *mod)
1857{
1858}
1859
1860static void module_remove_modinfo_attrs(struct module *mod)
1861{
1862}
1863
1864static void del_usage_links(struct module *mod)
1865{
1866}
1867
1868static void init_param_lock(struct module *mod)
1869{
1870}
1871#endif /* CONFIG_SYSFS */
1872
1873static void mod_sysfs_teardown(struct module *mod)
1874{
1875	del_usage_links(mod);
1876	module_remove_modinfo_attrs(mod);
1877	module_param_sysfs_remove(mod);
1878	kobject_put(mod->mkobj.drivers_dir);
1879	kobject_put(mod->holders_dir);
1880	mod_sysfs_fini(mod);
1881}
1882
1883#ifdef CONFIG_STRICT_MODULE_RWX
1884/*
1885 * LKM RO/NX protection: protect module's text/ro-data
1886 * from modification and any data from execution.
1887 *
1888 * General layout of module is:
1889 *          [text] [read-only-data] [ro-after-init] [writable data]
1890 * text_size -----^                ^               ^               ^
1891 * ro_size ------------------------|               |               |
1892 * ro_after_init_size -----------------------------|               |
1893 * size -----------------------------------------------------------|
1894 *
1895 * These values are always page-aligned (as is base)
1896 */
 
 
 
 
 
 
 
 
1897static void frob_text(const struct module_layout *layout,
1898		      int (*set_memory)(unsigned long start, int num_pages))
1899{
1900	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1901	BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1902	set_memory((unsigned long)layout->base,
1903		   layout->text_size >> PAGE_SHIFT);
1904}
1905
 
 
 
 
 
 
 
 
 
 
1906static void frob_rodata(const struct module_layout *layout,
1907			int (*set_memory)(unsigned long start, int num_pages))
1908{
1909	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1910	BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
1911	BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1912	set_memory((unsigned long)layout->base + layout->text_size,
1913		   (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
1914}
1915
1916static void frob_ro_after_init(const struct module_layout *layout,
1917				int (*set_memory)(unsigned long start, int num_pages))
1918{
1919	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1920	BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
1921	BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1922	set_memory((unsigned long)layout->base + layout->ro_size,
1923		   (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
1924}
1925
1926static void frob_writable_data(const struct module_layout *layout,
1927			       int (*set_memory)(unsigned long start, int num_pages))
1928{
1929	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
1930	BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
1931	BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
1932	set_memory((unsigned long)layout->base + layout->ro_after_init_size,
1933		   (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
1934}
1935
1936/* livepatching wants to disable read-only so it can frob module. */
1937void module_disable_ro(const struct module *mod)
1938{
1939	if (!rodata_enabled)
1940		return;
1941
1942	frob_text(&mod->core_layout, set_memory_rw);
1943	frob_rodata(&mod->core_layout, set_memory_rw);
1944	frob_ro_after_init(&mod->core_layout, set_memory_rw);
1945	frob_text(&mod->init_layout, set_memory_rw);
1946	frob_rodata(&mod->init_layout, set_memory_rw);
1947}
1948
1949void module_enable_ro(const struct module *mod, bool after_init)
1950{
1951	if (!rodata_enabled)
1952		return;
1953
 
 
1954	frob_text(&mod->core_layout, set_memory_ro);
 
1955	frob_rodata(&mod->core_layout, set_memory_ro);
1956	frob_text(&mod->init_layout, set_memory_ro);
1957	frob_rodata(&mod->init_layout, set_memory_ro);
1958
1959	if (after_init)
1960		frob_ro_after_init(&mod->core_layout, set_memory_ro);
1961}
1962
1963static void module_enable_nx(const struct module *mod)
1964{
1965	frob_rodata(&mod->core_layout, set_memory_nx);
1966	frob_ro_after_init(&mod->core_layout, set_memory_nx);
1967	frob_writable_data(&mod->core_layout, set_memory_nx);
1968	frob_rodata(&mod->init_layout, set_memory_nx);
1969	frob_writable_data(&mod->init_layout, set_memory_nx);
1970}
1971
1972static void module_disable_nx(const struct module *mod)
 
1973{
1974	frob_rodata(&mod->core_layout, set_memory_x);
1975	frob_ro_after_init(&mod->core_layout, set_memory_x);
1976	frob_writable_data(&mod->core_layout, set_memory_x);
1977	frob_rodata(&mod->init_layout, set_memory_x);
1978	frob_writable_data(&mod->init_layout, set_memory_x);
1979}
1980
1981/* Iterate through all modules and set each module's text as RW */
1982void set_all_modules_text_rw(void)
1983{
1984	struct module *mod;
1985
1986	if (!rodata_enabled)
1987		return;
1988
1989	mutex_lock(&module_mutex);
1990	list_for_each_entry_rcu(mod, &modules, list) {
1991		if (mod->state == MODULE_STATE_UNFORMED)
1992			continue;
1993
1994		frob_text(&mod->core_layout, set_memory_rw);
1995		frob_text(&mod->init_layout, set_memory_rw);
 
1996	}
1997	mutex_unlock(&module_mutex);
1998}
1999
2000/* Iterate through all modules and set each module's text as RO */
2001void set_all_modules_text_ro(void)
2002{
2003	struct module *mod;
2004
2005	if (!rodata_enabled)
2006		return;
2007
2008	mutex_lock(&module_mutex);
2009	list_for_each_entry_rcu(mod, &modules, list) {
2010		/*
2011		 * Ignore going modules since it's possible that ro
2012		 * protection has already been disabled, otherwise we'll
2013		 * run into protection faults at module deallocation.
2014		 */
2015		if (mod->state == MODULE_STATE_UNFORMED ||
2016			mod->state == MODULE_STATE_GOING)
2017			continue;
2018
2019		frob_text(&mod->core_layout, set_memory_ro);
2020		frob_text(&mod->init_layout, set_memory_ro);
2021	}
2022	mutex_unlock(&module_mutex);
2023}
2024
2025static void disable_ro_nx(const struct module_layout *layout)
 
 
 
 
2026{
2027	if (rodata_enabled) {
2028		frob_text(layout, set_memory_rw);
2029		frob_rodata(layout, set_memory_rw);
2030		frob_ro_after_init(layout, set_memory_rw);
2031	}
2032	frob_rodata(layout, set_memory_x);
2033	frob_ro_after_init(layout, set_memory_x);
2034	frob_writable_data(layout, set_memory_x);
2035}
2036
2037#else
2038static void disable_ro_nx(const struct module_layout *layout) { }
2039static void module_enable_nx(const struct module *mod) { }
2040static void module_disable_nx(const struct module *mod) { }
2041#endif
2042
2043#ifdef CONFIG_LIVEPATCH
2044/*
2045 * Persist Elf information about a module. Copy the Elf header,
2046 * section header table, section string table, and symtab section
2047 * index from info to mod->klp_info.
2048 */
2049static int copy_module_elf(struct module *mod, struct load_info *info)
2050{
2051	unsigned int size, symndx;
2052	int ret;
2053
2054	size = sizeof(*mod->klp_info);
2055	mod->klp_info = kmalloc(size, GFP_KERNEL);
2056	if (mod->klp_info == NULL)
2057		return -ENOMEM;
2058
2059	/* Elf header */
2060	size = sizeof(mod->klp_info->hdr);
2061	memcpy(&mod->klp_info->hdr, info->hdr, size);
2062
2063	/* Elf section header table */
2064	size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
2065	mod->klp_info->sechdrs = kmalloc(size, GFP_KERNEL);
2066	if (mod->klp_info->sechdrs == NULL) {
2067		ret = -ENOMEM;
2068		goto free_info;
2069	}
2070	memcpy(mod->klp_info->sechdrs, info->sechdrs, size);
2071
2072	/* Elf section name string table */
2073	size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
2074	mod->klp_info->secstrings = kmalloc(size, GFP_KERNEL);
2075	if (mod->klp_info->secstrings == NULL) {
2076		ret = -ENOMEM;
2077		goto free_sechdrs;
2078	}
2079	memcpy(mod->klp_info->secstrings, info->secstrings, size);
2080
2081	/* Elf symbol section index */
2082	symndx = info->index.sym;
2083	mod->klp_info->symndx = symndx;
2084
2085	/*
2086	 * For livepatch modules, core_kallsyms.symtab is a complete
2087	 * copy of the original symbol table. Adjust sh_addr to point
2088	 * to core_kallsyms.symtab since the copy of the symtab in module
2089	 * init memory is freed at the end of do_init_module().
2090	 */
2091	mod->klp_info->sechdrs[symndx].sh_addr = \
2092		(unsigned long) mod->core_kallsyms.symtab;
2093
2094	return 0;
2095
2096free_sechdrs:
2097	kfree(mod->klp_info->sechdrs);
2098free_info:
2099	kfree(mod->klp_info);
2100	return ret;
2101}
2102
2103static void free_module_elf(struct module *mod)
2104{
2105	kfree(mod->klp_info->sechdrs);
2106	kfree(mod->klp_info->secstrings);
2107	kfree(mod->klp_info);
2108}
2109#else /* !CONFIG_LIVEPATCH */
2110static int copy_module_elf(struct module *mod, struct load_info *info)
2111{
2112	return 0;
2113}
2114
2115static void free_module_elf(struct module *mod)
2116{
2117}
2118#endif /* CONFIG_LIVEPATCH */
2119
2120void __weak module_memfree(void *module_region)
2121{
 
 
 
 
 
2122	vfree(module_region);
2123}
2124
2125void __weak module_arch_cleanup(struct module *mod)
2126{
2127}
2128
2129void __weak module_arch_freeing_init(struct module *mod)
2130{
2131}
2132
2133/* Free a module, remove from lists, etc. */
2134static void free_module(struct module *mod)
2135{
2136	trace_module_free(mod);
2137
2138	mod_sysfs_teardown(mod);
2139
2140	/* We leave it in list to prevent duplicate loads, but make sure
2141	 * that noone uses it while it's being deconstructed. */
2142	mutex_lock(&module_mutex);
2143	mod->state = MODULE_STATE_UNFORMED;
2144	mutex_unlock(&module_mutex);
2145
2146	/* Remove dynamic debug info */
2147	ddebug_remove_module(mod->name);
2148
2149	/* Arch-specific cleanup. */
2150	module_arch_cleanup(mod);
2151
2152	/* Module unload stuff */
2153	module_unload_free(mod);
2154
2155	/* Free any allocated parameters. */
2156	destroy_params(mod->kp, mod->num_kp);
2157
2158	if (is_livepatch_module(mod))
2159		free_module_elf(mod);
2160
2161	/* Now we can delete it from the lists */
2162	mutex_lock(&module_mutex);
2163	/* Unlink carefully: kallsyms could be walking list. */
2164	list_del_rcu(&mod->list);
2165	mod_tree_remove(mod);
2166	/* Remove this module from bug list, this uses list_del_rcu */
2167	module_bug_cleanup(mod);
2168	/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2169	synchronize_sched();
2170	mutex_unlock(&module_mutex);
2171
2172	/* This may be empty, but that's OK */
2173	disable_ro_nx(&mod->init_layout);
2174	module_arch_freeing_init(mod);
2175	module_memfree(mod->init_layout.base);
2176	kfree(mod->args);
2177	percpu_modfree(mod);
2178
2179	/* Free lock-classes; relies on the preceding sync_rcu(). */
2180	lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2181
2182	/* Finally, free the core (containing the module structure) */
2183	disable_ro_nx(&mod->core_layout);
2184	module_memfree(mod->core_layout.base);
2185}
2186
2187void *__symbol_get(const char *symbol)
2188{
2189	struct module *owner;
2190	const struct kernel_symbol *sym;
2191
2192	preempt_disable();
2193	sym = find_symbol(symbol, &owner, NULL, true, true);
2194	if (sym && strong_try_module_get(owner))
2195		sym = NULL;
2196	preempt_enable();
2197
2198	return sym ? (void *)sym->value : NULL;
2199}
2200EXPORT_SYMBOL_GPL(__symbol_get);
2201
2202/*
2203 * Ensure that an exported symbol [global namespace] does not already exist
2204 * in the kernel or in some other module's exported symbol table.
2205 *
2206 * You must hold the module_mutex.
2207 */
2208static int verify_export_symbols(struct module *mod)
2209{
2210	unsigned int i;
2211	struct module *owner;
2212	const struct kernel_symbol *s;
2213	struct {
2214		const struct kernel_symbol *sym;
2215		unsigned int num;
2216	} arr[] = {
2217		{ mod->syms, mod->num_syms },
2218		{ mod->gpl_syms, mod->num_gpl_syms },
2219		{ mod->gpl_future_syms, mod->num_gpl_future_syms },
2220#ifdef CONFIG_UNUSED_SYMBOLS
2221		{ mod->unused_syms, mod->num_unused_syms },
2222		{ mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2223#endif
2224	};
2225
2226	for (i = 0; i < ARRAY_SIZE(arr); i++) {
2227		for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2228			if (find_symbol(s->name, &owner, NULL, true, false)) {
 
2229				pr_err("%s: exports duplicate symbol %s"
2230				       " (owned by %s)\n",
2231				       mod->name, s->name, module_name(owner));
 
2232				return -ENOEXEC;
2233			}
2234		}
2235	}
2236	return 0;
2237}
2238
2239/* Change all symbols so that st_value encodes the pointer directly. */
2240static int simplify_symbols(struct module *mod, const struct load_info *info)
2241{
2242	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2243	Elf_Sym *sym = (void *)symsec->sh_addr;
2244	unsigned long secbase;
2245	unsigned int i;
2246	int ret = 0;
2247	const struct kernel_symbol *ksym;
2248
2249	for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2250		const char *name = info->strtab + sym[i].st_name;
2251
2252		switch (sym[i].st_shndx) {
2253		case SHN_COMMON:
2254			/* Ignore common symbols */
2255			if (!strncmp(name, "__gnu_lto", 9))
2256				break;
2257
2258			/* We compiled with -fno-common.  These are not
2259			   supposed to happen.  */
2260			pr_debug("Common symbol: %s\n", name);
2261			pr_warn("%s: please compile with -fno-common\n",
2262			       mod->name);
2263			ret = -ENOEXEC;
2264			break;
2265
2266		case SHN_ABS:
2267			/* Don't need to do anything */
2268			pr_debug("Absolute symbol: 0x%08lx\n",
2269			       (long)sym[i].st_value);
2270			break;
2271
2272		case SHN_LIVEPATCH:
2273			/* Livepatch symbols are resolved by livepatch */
2274			break;
2275
2276		case SHN_UNDEF:
2277			ksym = resolve_symbol_wait(mod, info, name);
2278			/* Ok if resolved.  */
2279			if (ksym && !IS_ERR(ksym)) {
2280				sym[i].st_value = ksym->value;
2281				break;
2282			}
2283
2284			/* Ok if weak.  */
2285			if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2286				break;
2287
2288			pr_warn("%s: Unknown symbol %s (err %li)\n",
2289				mod->name, name, PTR_ERR(ksym));
2290			ret = PTR_ERR(ksym) ?: -ENOENT;
 
 
2291			break;
2292
2293		default:
2294			/* Divert to percpu allocation if a percpu var. */
2295			if (sym[i].st_shndx == info->index.pcpu)
2296				secbase = (unsigned long)mod_percpu(mod);
2297			else
2298				secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2299			sym[i].st_value += secbase;
2300			break;
2301		}
2302	}
2303
2304	return ret;
2305}
2306
2307static int apply_relocations(struct module *mod, const struct load_info *info)
2308{
2309	unsigned int i;
2310	int err = 0;
2311
2312	/* Now do relocations. */
2313	for (i = 1; i < info->hdr->e_shnum; i++) {
2314		unsigned int infosec = info->sechdrs[i].sh_info;
2315
2316		/* Not a valid relocation section? */
2317		if (infosec >= info->hdr->e_shnum)
2318			continue;
2319
2320		/* Don't bother with non-allocated sections */
2321		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2322			continue;
2323
2324		/* Livepatch relocation sections are applied by livepatch */
2325		if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2326			continue;
2327
2328		if (info->sechdrs[i].sh_type == SHT_REL)
 
 
 
2329			err = apply_relocate(info->sechdrs, info->strtab,
2330					     info->index.sym, i, mod);
2331		else if (info->sechdrs[i].sh_type == SHT_RELA)
2332			err = apply_relocate_add(info->sechdrs, info->strtab,
2333						 info->index.sym, i, mod);
2334		if (err < 0)
2335			break;
2336	}
2337	return err;
2338}
2339
2340/* Additional bytes needed by arch in front of individual sections */
2341unsigned int __weak arch_mod_section_prepend(struct module *mod,
2342					     unsigned int section)
2343{
2344	/* default implementation just returns zero */
2345	return 0;
2346}
2347
2348/* Update size with this section: return offset. */
2349static long get_offset(struct module *mod, unsigned int *size,
2350		       Elf_Shdr *sechdr, unsigned int section)
2351{
2352	long ret;
2353
2354	*size += arch_mod_section_prepend(mod, section);
2355	ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2356	*size = ret + sechdr->sh_size;
2357	return ret;
2358}
2359
2360/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2361   might -- code, read-only data, read-write data, small data.  Tally
2362   sizes, and place the offsets into sh_entsize fields: high bit means it
2363   belongs in init. */
2364static void layout_sections(struct module *mod, struct load_info *info)
2365{
2366	static unsigned long const masks[][2] = {
2367		/* NOTE: all executable code must be the first section
2368		 * in this array; otherwise modify the text_size
2369		 * finder in the two loops below */
2370		{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2371		{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2372		{ SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
2373		{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2374		{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2375	};
2376	unsigned int m, i;
2377
2378	for (i = 0; i < info->hdr->e_shnum; i++)
2379		info->sechdrs[i].sh_entsize = ~0UL;
2380
2381	pr_debug("Core section allocation order:\n");
2382	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2383		for (i = 0; i < info->hdr->e_shnum; ++i) {
2384			Elf_Shdr *s = &info->sechdrs[i];
2385			const char *sname = info->secstrings + s->sh_name;
2386
2387			if ((s->sh_flags & masks[m][0]) != masks[m][0]
2388			    || (s->sh_flags & masks[m][1])
2389			    || s->sh_entsize != ~0UL
2390			    || strstarts(sname, ".init"))
2391				continue;
2392			s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2393			pr_debug("\t%s\n", sname);
2394		}
2395		switch (m) {
2396		case 0: /* executable */
2397			mod->core_layout.size = debug_align(mod->core_layout.size);
2398			mod->core_layout.text_size = mod->core_layout.size;
2399			break;
2400		case 1: /* RO: text and ro-data */
2401			mod->core_layout.size = debug_align(mod->core_layout.size);
2402			mod->core_layout.ro_size = mod->core_layout.size;
2403			break;
2404		case 2: /* RO after init */
2405			mod->core_layout.size = debug_align(mod->core_layout.size);
2406			mod->core_layout.ro_after_init_size = mod->core_layout.size;
2407			break;
2408		case 4: /* whole core */
2409			mod->core_layout.size = debug_align(mod->core_layout.size);
2410			break;
2411		}
2412	}
2413
2414	pr_debug("Init section allocation order:\n");
2415	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2416		for (i = 0; i < info->hdr->e_shnum; ++i) {
2417			Elf_Shdr *s = &info->sechdrs[i];
2418			const char *sname = info->secstrings + s->sh_name;
2419
2420			if ((s->sh_flags & masks[m][0]) != masks[m][0]
2421			    || (s->sh_flags & masks[m][1])
2422			    || s->sh_entsize != ~0UL
2423			    || !strstarts(sname, ".init"))
2424				continue;
2425			s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2426					 | INIT_OFFSET_MASK);
2427			pr_debug("\t%s\n", sname);
2428		}
2429		switch (m) {
2430		case 0: /* executable */
2431			mod->init_layout.size = debug_align(mod->init_layout.size);
2432			mod->init_layout.text_size = mod->init_layout.size;
2433			break;
2434		case 1: /* RO: text and ro-data */
2435			mod->init_layout.size = debug_align(mod->init_layout.size);
2436			mod->init_layout.ro_size = mod->init_layout.size;
2437			break;
2438		case 2:
2439			/*
2440			 * RO after init doesn't apply to init_layout (only
2441			 * core_layout), so it just takes the value of ro_size.
2442			 */
2443			mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
2444			break;
2445		case 4: /* whole init */
2446			mod->init_layout.size = debug_align(mod->init_layout.size);
2447			break;
2448		}
2449	}
2450}
2451
2452static void set_license(struct module *mod, const char *license)
2453{
2454	if (!license)
2455		license = "unspecified";
2456
2457	if (!license_is_gpl_compatible(license)) {
2458		if (!test_taint(TAINT_PROPRIETARY_MODULE))
2459			pr_warn("%s: module license '%s' taints kernel.\n",
2460				mod->name, license);
2461		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2462				 LOCKDEP_NOW_UNRELIABLE);
2463	}
2464}
2465
2466/* Parse tag=value strings from .modinfo section */
2467static char *next_string(char *string, unsigned long *secsize)
2468{
2469	/* Skip non-zero chars */
2470	while (string[0]) {
2471		string++;
2472		if ((*secsize)-- <= 1)
2473			return NULL;
2474	}
2475
2476	/* Skip any zero padding. */
2477	while (!string[0]) {
2478		string++;
2479		if ((*secsize)-- <= 1)
2480			return NULL;
2481	}
2482	return string;
2483}
2484
2485static char *get_modinfo(struct load_info *info, const char *tag)
 
2486{
2487	char *p;
2488	unsigned int taglen = strlen(tag);
2489	Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2490	unsigned long size = infosec->sh_size;
2491
2492	for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
 
 
 
 
 
 
 
 
 
 
 
2493		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2494			return p + taglen + 1;
2495	}
2496	return NULL;
2497}
2498
 
 
 
 
 
2499static void setup_modinfo(struct module *mod, struct load_info *info)
2500{
2501	struct module_attribute *attr;
2502	int i;
2503
2504	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2505		if (attr->setup)
2506			attr->setup(mod, get_modinfo(info, attr->attr.name));
2507	}
2508}
2509
2510static void free_modinfo(struct module *mod)
2511{
2512	struct module_attribute *attr;
2513	int i;
2514
2515	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2516		if (attr->free)
2517			attr->free(mod);
2518	}
2519}
2520
2521#ifdef CONFIG_KALLSYMS
2522
2523/* lookup symbol in given range of kernel_symbols */
2524static const struct kernel_symbol *lookup_symbol(const char *name,
2525	const struct kernel_symbol *start,
2526	const struct kernel_symbol *stop)
2527{
2528	return bsearch(name, start, stop - start,
2529			sizeof(struct kernel_symbol), cmp_name);
2530}
2531
2532static int is_exported(const char *name, unsigned long value,
2533		       const struct module *mod)
2534{
2535	const struct kernel_symbol *ks;
2536	if (!mod)
2537		ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2538	else
2539		ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2540	return ks != NULL && ks->value == value;
 
2541}
2542
2543/* As per nm */
2544static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2545{
2546	const Elf_Shdr *sechdrs = info->sechdrs;
2547
2548	if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2549		if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2550			return 'v';
2551		else
2552			return 'w';
2553	}
2554	if (sym->st_shndx == SHN_UNDEF)
2555		return 'U';
2556	if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
2557		return 'a';
2558	if (sym->st_shndx >= SHN_LORESERVE)
2559		return '?';
2560	if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2561		return 't';
2562	if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2563	    && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2564		if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2565			return 'r';
2566		else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2567			return 'g';
2568		else
2569			return 'd';
2570	}
2571	if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2572		if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2573			return 's';
2574		else
2575			return 'b';
2576	}
2577	if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2578		      ".debug")) {
2579		return 'n';
2580	}
2581	return '?';
2582}
2583
2584static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2585			unsigned int shnum, unsigned int pcpundx)
2586{
2587	const Elf_Shdr *sec;
2588
2589	if (src->st_shndx == SHN_UNDEF
2590	    || src->st_shndx >= shnum
2591	    || !src->st_name)
2592		return false;
2593
2594#ifdef CONFIG_KALLSYMS_ALL
2595	if (src->st_shndx == pcpundx)
2596		return true;
2597#endif
2598
2599	sec = sechdrs + src->st_shndx;
2600	if (!(sec->sh_flags & SHF_ALLOC)
2601#ifndef CONFIG_KALLSYMS_ALL
2602	    || !(sec->sh_flags & SHF_EXECINSTR)
2603#endif
2604	    || (sec->sh_entsize & INIT_OFFSET_MASK))
2605		return false;
2606
2607	return true;
2608}
2609
2610/*
2611 * We only allocate and copy the strings needed by the parts of symtab
2612 * we keep.  This is simple, but has the effect of making multiple
2613 * copies of duplicates.  We could be more sophisticated, see
2614 * linux-kernel thread starting with
2615 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2616 */
2617static void layout_symtab(struct module *mod, struct load_info *info)
2618{
2619	Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2620	Elf_Shdr *strsect = info->sechdrs + info->index.str;
2621	const Elf_Sym *src;
2622	unsigned int i, nsrc, ndst, strtab_size = 0;
2623
2624	/* Put symbol section at end of init part of module. */
2625	symsect->sh_flags |= SHF_ALLOC;
2626	symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2627					 info->index.sym) | INIT_OFFSET_MASK;
2628	pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2629
2630	src = (void *)info->hdr + symsect->sh_offset;
2631	nsrc = symsect->sh_size / sizeof(*src);
2632
2633	/* Compute total space required for the core symbols' strtab. */
2634	for (ndst = i = 0; i < nsrc; i++) {
2635		if (i == 0 || is_livepatch_module(mod) ||
2636		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2637				   info->index.pcpu)) {
2638			strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2639			ndst++;
2640		}
2641	}
2642
2643	/* Append room for core symbols at end of core part. */
2644	info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2645	info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2646	mod->core_layout.size += strtab_size;
 
 
2647	mod->core_layout.size = debug_align(mod->core_layout.size);
2648
2649	/* Put string table section at end of init part of module. */
2650	strsect->sh_flags |= SHF_ALLOC;
2651	strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2652					 info->index.str) | INIT_OFFSET_MASK;
2653	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2654
2655	/* We'll tack temporary mod_kallsyms on the end. */
2656	mod->init_layout.size = ALIGN(mod->init_layout.size,
2657				      __alignof__(struct mod_kallsyms));
2658	info->mod_kallsyms_init_off = mod->init_layout.size;
2659	mod->init_layout.size += sizeof(struct mod_kallsyms);
 
 
2660	mod->init_layout.size = debug_align(mod->init_layout.size);
2661}
2662
2663/*
2664 * We use the full symtab and strtab which layout_symtab arranged to
2665 * be appended to the init section.  Later we switch to the cut-down
2666 * core-only ones.
2667 */
2668static void add_kallsyms(struct module *mod, const struct load_info *info)
2669{
2670	unsigned int i, ndst;
2671	const Elf_Sym *src;
2672	Elf_Sym *dst;
2673	char *s;
2674	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2675
2676	/* Set up to point into init section. */
2677	mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2678
2679	mod->kallsyms->symtab = (void *)symsec->sh_addr;
2680	mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2681	/* Make sure we get permanent strtab: don't use info->strtab. */
2682	mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
 
2683
2684	/* Set types up while we still have access to sections. */
2685	for (i = 0; i < mod->kallsyms->num_symtab; i++)
2686		mod->kallsyms->symtab[i].st_info
2687			= elf_type(&mod->kallsyms->symtab[i], info);
2688
2689	/* Now populate the cut down core kallsyms for after init. */
2690	mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2691	mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
 
2692	src = mod->kallsyms->symtab;
2693	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
 
2694		if (i == 0 || is_livepatch_module(mod) ||
2695		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2696				   info->index.pcpu)) {
 
 
2697			dst[ndst] = src[i];
2698			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2699			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2700				     KSYM_NAME_LEN) + 1;
2701		}
2702	}
2703	mod->core_kallsyms.num_symtab = ndst;
2704}
2705#else
2706static inline void layout_symtab(struct module *mod, struct load_info *info)
2707{
2708}
2709
2710static void add_kallsyms(struct module *mod, const struct load_info *info)
2711{
2712}
2713#endif /* CONFIG_KALLSYMS */
2714
2715static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num)
2716{
2717	if (!debug)
2718		return;
2719#ifdef CONFIG_DYNAMIC_DEBUG
2720	if (ddebug_add_module(debug, num, mod->name))
2721		pr_err("dynamic debug error adding module: %s\n",
2722			debug->modname);
2723#endif
2724}
2725
2726static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
2727{
2728	if (debug)
2729		ddebug_remove_module(mod->name);
2730}
2731
2732void * __weak module_alloc(unsigned long size)
2733{
2734	return vmalloc_exec(size);
 
 
 
 
 
 
 
 
 
 
 
 
2735}
2736
2737#ifdef CONFIG_DEBUG_KMEMLEAK
2738static void kmemleak_load_module(const struct module *mod,
2739				 const struct load_info *info)
2740{
2741	unsigned int i;
2742
2743	/* only scan the sections containing data */
2744	kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2745
2746	for (i = 1; i < info->hdr->e_shnum; i++) {
2747		/* Scan all writable sections that's not executable */
2748		if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2749		    !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2750		    (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2751			continue;
2752
2753		kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2754				   info->sechdrs[i].sh_size, GFP_KERNEL);
2755	}
2756}
2757#else
2758static inline void kmemleak_load_module(const struct module *mod,
2759					const struct load_info *info)
2760{
2761}
2762#endif
2763
2764#ifdef CONFIG_MODULE_SIG
2765static int module_sig_check(struct load_info *info, int flags)
2766{
2767	int err = -ENOKEY;
2768	const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
 
2769	const void *mod = info->hdr;
2770
2771	/*
2772	 * Require flags == 0, as a module with version information
2773	 * removed is no longer the module that was signed
2774	 */
2775	if (flags == 0 &&
2776	    info->len > markerlen &&
2777	    memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2778		/* We truncate the module to discard the signature */
2779		info->len -= markerlen;
2780		err = mod_verify_sig(mod, &info->len);
2781	}
2782
2783	if (!err) {
 
2784		info->sig_ok = true;
2785		return 0;
2786	}
2787
2788	/* Not having a signature is only an error if we're strict. */
2789	if (err == -ENOKEY && !sig_enforce)
2790		err = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2791
2792	return err;
 
 
 
 
 
 
2793}
2794#else /* !CONFIG_MODULE_SIG */
2795static int module_sig_check(struct load_info *info, int flags)
2796{
2797	return 0;
2798}
2799#endif /* !CONFIG_MODULE_SIG */
2800
2801/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2802static int elf_header_check(struct load_info *info)
2803{
2804	if (info->len < sizeof(*(info->hdr)))
2805		return -ENOEXEC;
2806
2807	if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2808	    || info->hdr->e_type != ET_REL
2809	    || !elf_check_arch(info->hdr)
2810	    || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2811		return -ENOEXEC;
2812
2813	if (info->hdr->e_shoff >= info->len
2814	    || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2815		info->len - info->hdr->e_shoff))
2816		return -ENOEXEC;
2817
2818	return 0;
2819}
2820
2821#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2822
2823static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2824{
2825	do {
2826		unsigned long n = min(len, COPY_CHUNK_SIZE);
2827
2828		if (copy_from_user(dst, usrc, n) != 0)
2829			return -EFAULT;
2830		cond_resched();
2831		dst += n;
2832		usrc += n;
2833		len -= n;
2834	} while (len);
2835	return 0;
2836}
2837
2838#ifdef CONFIG_LIVEPATCH
2839static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2840{
2841	if (get_modinfo(info, "livepatch")) {
2842		mod->klp = true;
2843		add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
2844		pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
2845			       mod->name);
2846	}
2847
2848	return 0;
2849}
2850#else /* !CONFIG_LIVEPATCH */
2851static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2852{
2853	if (get_modinfo(info, "livepatch")) {
2854		pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2855		       mod->name);
2856		return -ENOEXEC;
2857	}
2858
2859	return 0;
2860}
2861#endif /* CONFIG_LIVEPATCH */
2862
2863static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
2864{
2865	if (retpoline_module_ok(get_modinfo(info, "retpoline")))
2866		return;
2867
2868	pr_warn("%s: loading module not compiled with retpoline compiler.\n",
2869		mod->name);
2870}
2871
2872/* Sets info->hdr and info->len. */
2873static int copy_module_from_user(const void __user *umod, unsigned long len,
2874				  struct load_info *info)
2875{
2876	int err;
2877
2878	info->len = len;
2879	if (info->len < sizeof(*(info->hdr)))
2880		return -ENOEXEC;
2881
2882	err = security_kernel_read_file(NULL, READING_MODULE);
2883	if (err)
2884		return err;
2885
2886	/* Suck in entire file: we'll want most of it. */
2887	info->hdr = __vmalloc(info->len,
2888			GFP_KERNEL | __GFP_NOWARN, PAGE_KERNEL);
2889	if (!info->hdr)
2890		return -ENOMEM;
2891
2892	if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
2893		vfree(info->hdr);
2894		return -EFAULT;
2895	}
2896
2897	return 0;
2898}
2899
2900static void free_copy(struct load_info *info)
2901{
2902	vfree(info->hdr);
2903}
2904
2905static int rewrite_section_headers(struct load_info *info, int flags)
2906{
2907	unsigned int i;
2908
2909	/* This should always be true, but let's be sure. */
2910	info->sechdrs[0].sh_addr = 0;
2911
2912	for (i = 1; i < info->hdr->e_shnum; i++) {
2913		Elf_Shdr *shdr = &info->sechdrs[i];
2914		if (shdr->sh_type != SHT_NOBITS
2915		    && info->len < shdr->sh_offset + shdr->sh_size) {
2916			pr_err("Module len %lu truncated\n", info->len);
2917			return -ENOEXEC;
2918		}
2919
2920		/* Mark all sections sh_addr with their address in the
2921		   temporary image. */
2922		shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2923
2924#ifndef CONFIG_MODULE_UNLOAD
2925		/* Don't load .exit sections */
2926		if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2927			shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2928#endif
2929	}
2930
2931	/* Track but don't keep modinfo and version sections. */
2932	if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2933		info->index.vers = 0; /* Pretend no __versions section! */
2934	else
2935		info->index.vers = find_sec(info, "__versions");
2936	info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2937
2938	info->index.info = find_sec(info, ".modinfo");
2939	if (!info->index.info)
2940		info->name = "(missing .modinfo section)";
2941	else
2942		info->name = get_modinfo(info, "name");
2943	info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2944
2945	return 0;
2946}
2947
2948/*
2949 * Set up our basic convenience variables (pointers to section headers,
2950 * search for module section index etc), and do some basic section
2951 * verification.
2952 *
2953 * Return the temporary module pointer (we'll replace it with the final
2954 * one when we move the module sections around).
2955 */
2956static struct module *setup_load_info(struct load_info *info, int flags)
2957{
2958	unsigned int i;
2959	int err;
2960	struct module *mod;
2961
2962	/* Set up the convenience variables */
2963	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2964	info->secstrings = (void *)info->hdr
2965		+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2966
2967	err = rewrite_section_headers(info, flags);
2968	if (err)
2969		return ERR_PTR(err);
 
2970
2971	/* Find internal symbols and strings. */
2972	for (i = 1; i < info->hdr->e_shnum; i++) {
2973		if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2974			info->index.sym = i;
2975			info->index.str = info->sechdrs[i].sh_link;
2976			info->strtab = (char *)info->hdr
2977				+ info->sechdrs[info->index.str].sh_offset;
2978			break;
2979		}
2980	}
2981
 
 
 
 
 
 
2982	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2983	if (!info->index.mod) {
2984		pr_warn("%s: No module found in object\n",
2985			info->name ?: "(missing .modinfo name field)");
2986		return ERR_PTR(-ENOEXEC);
2987	}
2988	/* This is temporary: point mod into copy of data. */
2989	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2990
2991	/*
2992	 * If we didn't load the .modinfo 'name' field, fall back to
2993	 * on-disk struct mod 'name' field.
2994	 */
2995	if (!info->name)
2996		info->name = mod->name;
2997
2998	if (info->index.sym == 0) {
2999		pr_warn("%s: module has no symbols (stripped?)\n", info->name);
3000		return ERR_PTR(-ENOEXEC);
3001	}
3002
3003	info->index.pcpu = find_pcpusec(info);
3004
3005	/* Check module struct version now, before we try to use module. */
3006	if (!check_modstruct_version(info, mod))
3007		return ERR_PTR(-ENOEXEC);
3008
3009	return mod;
3010}
3011
3012static int check_modinfo(struct module *mod, struct load_info *info, int flags)
3013{
3014	const char *modmagic = get_modinfo(info, "vermagic");
3015	int err;
3016
3017	if (flags & MODULE_INIT_IGNORE_VERMAGIC)
3018		modmagic = NULL;
3019
3020	/* This is allowed: modprobe --force will invalidate it. */
3021	if (!modmagic) {
3022		err = try_to_force_load(mod, "bad vermagic");
3023		if (err)
3024			return err;
3025	} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
3026		pr_err("%s: version magic '%s' should be '%s'\n",
3027		       info->name, modmagic, vermagic);
3028		return -ENOEXEC;
3029	}
3030
3031	if (!get_modinfo(info, "intree")) {
3032		if (!test_taint(TAINT_OOT_MODULE))
3033			pr_warn("%s: loading out-of-tree module taints kernel.\n",
3034				mod->name);
3035		add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
3036	}
3037
3038	check_modinfo_retpoline(mod, info);
3039
3040	if (get_modinfo(info, "staging")) {
3041		add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
3042		pr_warn("%s: module is from the staging directory, the quality "
3043			"is unknown, you have been warned.\n", mod->name);
3044	}
3045
3046	err = check_modinfo_livepatch(mod, info);
3047	if (err)
3048		return err;
3049
3050	/* Set up license info based on the info section */
3051	set_license(mod, get_modinfo(info, "license"));
3052
3053	return 0;
3054}
3055
3056static int find_module_sections(struct module *mod, struct load_info *info)
3057{
3058	mod->kp = section_objs(info, "__param",
3059			       sizeof(*mod->kp), &mod->num_kp);
3060	mod->syms = section_objs(info, "__ksymtab",
3061				 sizeof(*mod->syms), &mod->num_syms);
3062	mod->crcs = section_addr(info, "__kcrctab");
3063	mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
3064				     sizeof(*mod->gpl_syms),
3065				     &mod->num_gpl_syms);
3066	mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
3067	mod->gpl_future_syms = section_objs(info,
3068					    "__ksymtab_gpl_future",
3069					    sizeof(*mod->gpl_future_syms),
3070					    &mod->num_gpl_future_syms);
3071	mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
3072
3073#ifdef CONFIG_UNUSED_SYMBOLS
3074	mod->unused_syms = section_objs(info, "__ksymtab_unused",
3075					sizeof(*mod->unused_syms),
3076					&mod->num_unused_syms);
3077	mod->unused_crcs = section_addr(info, "__kcrctab_unused");
3078	mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
3079					    sizeof(*mod->unused_gpl_syms),
3080					    &mod->num_unused_gpl_syms);
3081	mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
3082#endif
3083#ifdef CONFIG_CONSTRUCTORS
3084	mod->ctors = section_objs(info, ".ctors",
3085				  sizeof(*mod->ctors), &mod->num_ctors);
3086	if (!mod->ctors)
3087		mod->ctors = section_objs(info, ".init_array",
3088				sizeof(*mod->ctors), &mod->num_ctors);
3089	else if (find_sec(info, ".init_array")) {
3090		/*
3091		 * This shouldn't happen with same compiler and binutils
3092		 * building all parts of the module.
3093		 */
3094		pr_warn("%s: has both .ctors and .init_array.\n",
3095		       mod->name);
3096		return -EINVAL;
3097	}
3098#endif
3099
 
 
 
3100#ifdef CONFIG_TRACEPOINTS
3101	mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
3102					     sizeof(*mod->tracepoints_ptrs),
3103					     &mod->num_tracepoints);
3104#endif
3105#ifdef HAVE_JUMP_LABEL
 
 
 
 
 
 
 
 
 
 
3106	mod->jump_entries = section_objs(info, "__jump_table",
3107					sizeof(*mod->jump_entries),
3108					&mod->num_jump_entries);
3109#endif
3110#ifdef CONFIG_EVENT_TRACING
3111	mod->trace_events = section_objs(info, "_ftrace_events",
3112					 sizeof(*mod->trace_events),
3113					 &mod->num_trace_events);
3114	mod->trace_evals = section_objs(info, "_ftrace_eval_map",
3115					sizeof(*mod->trace_evals),
3116					&mod->num_trace_evals);
3117#endif
3118#ifdef CONFIG_TRACING
3119	mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
3120					 sizeof(*mod->trace_bprintk_fmt_start),
3121					 &mod->num_trace_bprintk_fmt);
3122#endif
3123#ifdef CONFIG_FTRACE_MCOUNT_RECORD
3124	/* sechdrs[0].sh_size is always zero */
3125	mod->ftrace_callsites = section_objs(info, "__mcount_loc",
3126					     sizeof(*mod->ftrace_callsites),
3127					     &mod->num_ftrace_callsites);
3128#endif
3129#ifdef CONFIG_FUNCTION_ERROR_INJECTION
3130	mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
3131					    sizeof(*mod->ei_funcs),
3132					    &mod->num_ei_funcs);
3133#endif
 
 
 
 
 
 
 
3134	mod->extable = section_objs(info, "__ex_table",
3135				    sizeof(*mod->extable), &mod->num_exentries);
3136
3137	if (section_addr(info, "__obsparm"))
3138		pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
3139
3140	info->debug = section_objs(info, "__verbose",
3141				   sizeof(*info->debug), &info->num_debug);
3142
3143	return 0;
3144}
3145
3146static int move_module(struct module *mod, struct load_info *info)
3147{
3148	int i;
3149	void *ptr;
3150
3151	/* Do the allocs. */
3152	ptr = module_alloc(mod->core_layout.size);
3153	/*
3154	 * The pointer to this block is stored in the module structure
3155	 * which is inside the block. Just mark it as not being a
3156	 * leak.
3157	 */
3158	kmemleak_not_leak(ptr);
3159	if (!ptr)
3160		return -ENOMEM;
3161
3162	memset(ptr, 0, mod->core_layout.size);
3163	mod->core_layout.base = ptr;
3164
3165	if (mod->init_layout.size) {
3166		ptr = module_alloc(mod->init_layout.size);
3167		/*
3168		 * The pointer to this block is stored in the module structure
3169		 * which is inside the block. This block doesn't need to be
3170		 * scanned as it contains data and code that will be freed
3171		 * after the module is initialized.
3172		 */
3173		kmemleak_ignore(ptr);
3174		if (!ptr) {
3175			module_memfree(mod->core_layout.base);
3176			return -ENOMEM;
3177		}
3178		memset(ptr, 0, mod->init_layout.size);
3179		mod->init_layout.base = ptr;
3180	} else
3181		mod->init_layout.base = NULL;
3182
3183	/* Transfer each section which specifies SHF_ALLOC */
3184	pr_debug("final section addresses:\n");
3185	for (i = 0; i < info->hdr->e_shnum; i++) {
3186		void *dest;
3187		Elf_Shdr *shdr = &info->sechdrs[i];
3188
3189		if (!(shdr->sh_flags & SHF_ALLOC))
3190			continue;
3191
3192		if (shdr->sh_entsize & INIT_OFFSET_MASK)
3193			dest = mod->init_layout.base
3194				+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3195		else
3196			dest = mod->core_layout.base + shdr->sh_entsize;
3197
3198		if (shdr->sh_type != SHT_NOBITS)
3199			memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3200		/* Update sh_addr to point to copy in image. */
3201		shdr->sh_addr = (unsigned long)dest;
3202		pr_debug("\t0x%lx %s\n",
3203			 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3204	}
3205
3206	return 0;
3207}
3208
3209static int check_module_license_and_versions(struct module *mod)
3210{
3211	int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
3212
3213	/*
3214	 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3215	 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3216	 * using GPL-only symbols it needs.
3217	 */
3218	if (strcmp(mod->name, "ndiswrapper") == 0)
3219		add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3220
3221	/* driverloader was caught wrongly pretending to be under GPL */
3222	if (strcmp(mod->name, "driverloader") == 0)
3223		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3224				 LOCKDEP_NOW_UNRELIABLE);
3225
3226	/* lve claims to be GPL but upstream won't provide source */
3227	if (strcmp(mod->name, "lve") == 0)
3228		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3229				 LOCKDEP_NOW_UNRELIABLE);
3230
3231	if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
3232		pr_warn("%s: module license taints kernel.\n", mod->name);
3233
3234#ifdef CONFIG_MODVERSIONS
3235	if ((mod->num_syms && !mod->crcs)
3236	    || (mod->num_gpl_syms && !mod->gpl_crcs)
3237	    || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3238#ifdef CONFIG_UNUSED_SYMBOLS
3239	    || (mod->num_unused_syms && !mod->unused_crcs)
3240	    || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3241#endif
3242		) {
3243		return try_to_force_load(mod,
3244					 "no versions for exported symbols");
3245	}
3246#endif
3247	return 0;
3248}
3249
3250static void flush_module_icache(const struct module *mod)
3251{
3252	mm_segment_t old_fs;
3253
3254	/* flush the icache in correct context */
3255	old_fs = get_fs();
3256	set_fs(KERNEL_DS);
3257
3258	/*
3259	 * Flush the instruction cache, since we've played with text.
3260	 * Do it before processing of module parameters, so the module
3261	 * can provide parameter accessor functions of its own.
3262	 */
3263	if (mod->init_layout.base)
3264		flush_icache_range((unsigned long)mod->init_layout.base,
3265				   (unsigned long)mod->init_layout.base
3266				   + mod->init_layout.size);
3267	flush_icache_range((unsigned long)mod->core_layout.base,
3268			   (unsigned long)mod->core_layout.base + mod->core_layout.size);
3269
3270	set_fs(old_fs);
3271}
3272
3273int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3274				     Elf_Shdr *sechdrs,
3275				     char *secstrings,
3276				     struct module *mod)
3277{
3278	return 0;
3279}
3280
3281/* module_blacklist is a comma-separated list of module names */
3282static char *module_blacklist;
3283static bool blacklisted(const char *module_name)
3284{
3285	const char *p;
3286	size_t len;
3287
3288	if (!module_blacklist)
3289		return false;
3290
3291	for (p = module_blacklist; *p; p += len) {
3292		len = strcspn(p, ",");
3293		if (strlen(module_name) == len && !memcmp(module_name, p, len))
3294			return true;
3295		if (p[len] == ',')
3296			len++;
3297	}
3298	return false;
3299}
3300core_param(module_blacklist, module_blacklist, charp, 0400);
3301
3302static struct module *layout_and_allocate(struct load_info *info, int flags)
3303{
3304	/* Module within temporary copy. */
3305	struct module *mod;
3306	unsigned int ndx;
3307	int err;
3308
3309	mod = setup_load_info(info, flags);
3310	if (IS_ERR(mod))
3311		return mod;
3312
3313	if (blacklisted(info->name))
3314		return ERR_PTR(-EPERM);
3315
3316	err = check_modinfo(mod, info, flags);
3317	if (err)
3318		return ERR_PTR(err);
3319
3320	/* Allow arches to frob section contents and sizes.  */
3321	err = module_frob_arch_sections(info->hdr, info->sechdrs,
3322					info->secstrings, mod);
 
 
 
 
 
3323	if (err < 0)
3324		return ERR_PTR(err);
3325
3326	/* We will do a special allocation for per-cpu sections later. */
3327	info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3328
3329	/*
3330	 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3331	 * layout_sections() can put it in the right place.
3332	 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3333	 */
3334	ndx = find_sec(info, ".data..ro_after_init");
3335	if (ndx)
3336		info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
 
 
 
 
 
 
 
 
 
3337
3338	/* Determine total sizes, and put offsets in sh_entsize.  For now
3339	   this is done generically; there doesn't appear to be any
3340	   special cases for the architectures. */
3341	layout_sections(mod, info);
3342	layout_symtab(mod, info);
3343
3344	/* Allocate and move to the final place */
3345	err = move_module(mod, info);
3346	if (err)
3347		return ERR_PTR(err);
3348
3349	/* Module has been copied to its final place now: return it. */
3350	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3351	kmemleak_load_module(mod, info);
3352	return mod;
3353}
3354
3355/* mod is no longer valid after this! */
3356static void module_deallocate(struct module *mod, struct load_info *info)
3357{
3358	percpu_modfree(mod);
3359	module_arch_freeing_init(mod);
3360	module_memfree(mod->init_layout.base);
3361	module_memfree(mod->core_layout.base);
3362}
3363
3364int __weak module_finalize(const Elf_Ehdr *hdr,
3365			   const Elf_Shdr *sechdrs,
3366			   struct module *me)
3367{
3368	return 0;
3369}
3370
3371static int post_relocation(struct module *mod, const struct load_info *info)
3372{
3373	/* Sort exception table now relocations are done. */
3374	sort_extable(mod->extable, mod->extable + mod->num_exentries);
3375
3376	/* Copy relocated percpu area over. */
3377	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3378		       info->sechdrs[info->index.pcpu].sh_size);
3379
3380	/* Setup kallsyms-specific fields. */
3381	add_kallsyms(mod, info);
3382
3383	/* Arch-specific module finalizing. */
3384	return module_finalize(info->hdr, info->sechdrs, mod);
3385}
3386
3387/* Is this module of this name done loading?  No locks held. */
3388static bool finished_loading(const char *name)
3389{
3390	struct module *mod;
3391	bool ret;
3392
3393	/*
3394	 * The module_mutex should not be a heavily contended lock;
3395	 * if we get the occasional sleep here, we'll go an extra iteration
3396	 * in the wait_event_interruptible(), which is harmless.
3397	 */
3398	sched_annotate_sleep();
3399	mutex_lock(&module_mutex);
3400	mod = find_module_all(name, strlen(name), true);
3401	ret = !mod || mod->state == MODULE_STATE_LIVE
3402		|| mod->state == MODULE_STATE_GOING;
3403	mutex_unlock(&module_mutex);
3404
3405	return ret;
3406}
3407
3408/* Call module constructors. */
3409static void do_mod_ctors(struct module *mod)
3410{
3411#ifdef CONFIG_CONSTRUCTORS
3412	unsigned long i;
3413
3414	for (i = 0; i < mod->num_ctors; i++)
3415		mod->ctors[i]();
3416#endif
3417}
3418
3419/* For freeing module_init on success, in case kallsyms traversing */
3420struct mod_initfree {
3421	struct rcu_head rcu;
3422	void *module_init;
3423};
3424
3425static void do_free_init(struct rcu_head *head)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3426{
3427	struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
3428	module_memfree(m->module_init);
3429	kfree(m);
3430}
 
3431
3432/*
3433 * This is where the real work happens.
3434 *
3435 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3436 * helper command 'lx-symbols'.
3437 */
3438static noinline int do_init_module(struct module *mod)
3439{
3440	int ret = 0;
3441	struct mod_initfree *freeinit;
3442
3443	freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3444	if (!freeinit) {
3445		ret = -ENOMEM;
3446		goto fail;
3447	}
3448	freeinit->module_init = mod->init_layout.base;
3449
3450	/*
3451	 * We want to find out whether @mod uses async during init.  Clear
3452	 * PF_USED_ASYNC.  async_schedule*() will set it.
3453	 */
3454	current->flags &= ~PF_USED_ASYNC;
3455
3456	do_mod_ctors(mod);
3457	/* Start the module */
3458	if (mod->init != NULL)
3459		ret = do_one_initcall(mod->init);
3460	if (ret < 0) {
3461		goto fail_free_freeinit;
3462	}
3463	if (ret > 0) {
3464		pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3465			"follow 0/-E convention\n"
3466			"%s: loading module anyway...\n",
3467			__func__, mod->name, ret, __func__);
3468		dump_stack();
3469	}
3470
3471	/* Now it's a first class citizen! */
3472	mod->state = MODULE_STATE_LIVE;
3473	blocking_notifier_call_chain(&module_notify_list,
3474				     MODULE_STATE_LIVE, mod);
3475
3476	/*
3477	 * We need to finish all async code before the module init sequence
3478	 * is done.  This has potential to deadlock.  For example, a newly
3479	 * detected block device can trigger request_module() of the
3480	 * default iosched from async probing task.  Once userland helper
3481	 * reaches here, async_synchronize_full() will wait on the async
3482	 * task waiting on request_module() and deadlock.
3483	 *
3484	 * This deadlock is avoided by perfomring async_synchronize_full()
3485	 * iff module init queued any async jobs.  This isn't a full
3486	 * solution as it will deadlock the same if module loading from
3487	 * async jobs nests more than once; however, due to the various
3488	 * constraints, this hack seems to be the best option for now.
3489	 * Please refer to the following thread for details.
3490	 *
3491	 * http://thread.gmane.org/gmane.linux.kernel/1420814
3492	 */
3493	if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3494		async_synchronize_full();
3495
3496	ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
3497			mod->init_layout.size);
3498	mutex_lock(&module_mutex);
3499	/* Drop initial reference. */
3500	module_put(mod);
3501	trim_init_extable(mod);
3502#ifdef CONFIG_KALLSYMS
3503	/* Switch to core kallsyms now init is done: kallsyms may be walking! */
3504	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3505#endif
3506	module_enable_ro(mod, true);
3507	mod_tree_remove_init(mod);
3508	disable_ro_nx(&mod->init_layout);
3509	module_arch_freeing_init(mod);
3510	mod->init_layout.base = NULL;
3511	mod->init_layout.size = 0;
3512	mod->init_layout.ro_size = 0;
3513	mod->init_layout.ro_after_init_size = 0;
3514	mod->init_layout.text_size = 0;
3515	/*
3516	 * We want to free module_init, but be aware that kallsyms may be
3517	 * walking this with preempt disabled.  In all the failure paths, we
3518	 * call synchronize_sched(), but we don't want to slow down the success
3519	 * path, so use actual RCU here.
 
 
3520	 * Note that module_alloc() on most architectures creates W+X page
3521	 * mappings which won't be cleaned up until do_free_init() runs.  Any
3522	 * code such as mark_rodata_ro() which depends on those mappings to
3523	 * be cleaned up needs to sync with the queued work - ie
3524	 * rcu_barrier_sched()
3525	 */
3526	call_rcu_sched(&freeinit->rcu, do_free_init);
 
 
3527	mutex_unlock(&module_mutex);
3528	wake_up_all(&module_wq);
3529
3530	return 0;
3531
3532fail_free_freeinit:
3533	kfree(freeinit);
3534fail:
3535	/* Try to protect us from buggy refcounters. */
3536	mod->state = MODULE_STATE_GOING;
3537	synchronize_sched();
3538	module_put(mod);
3539	blocking_notifier_call_chain(&module_notify_list,
3540				     MODULE_STATE_GOING, mod);
3541	klp_module_going(mod);
3542	ftrace_release_mod(mod);
3543	free_module(mod);
3544	wake_up_all(&module_wq);
3545	return ret;
3546}
3547
3548static int may_init_module(void)
3549{
3550	if (!capable(CAP_SYS_MODULE) || modules_disabled)
3551		return -EPERM;
3552
3553	return 0;
3554}
3555
3556/*
3557 * We try to place it in the list now to make sure it's unique before
3558 * we dedicate too many resources.  In particular, temporary percpu
3559 * memory exhaustion.
3560 */
3561static int add_unformed_module(struct module *mod)
3562{
3563	int err;
3564	struct module *old;
3565
3566	mod->state = MODULE_STATE_UNFORMED;
3567
3568again:
3569	mutex_lock(&module_mutex);
3570	old = find_module_all(mod->name, strlen(mod->name), true);
3571	if (old != NULL) {
3572		if (old->state == MODULE_STATE_COMING
3573		    || old->state == MODULE_STATE_UNFORMED) {
3574			/* Wait in case it fails to load. */
3575			mutex_unlock(&module_mutex);
3576			err = wait_event_interruptible(module_wq,
3577					       finished_loading(mod->name));
3578			if (err)
3579				goto out_unlocked;
3580			goto again;
3581		}
3582		err = -EEXIST;
3583		goto out;
3584	}
3585	mod_update_bounds(mod);
3586	list_add_rcu(&mod->list, &modules);
3587	mod_tree_insert(mod);
3588	err = 0;
3589
3590out:
3591	mutex_unlock(&module_mutex);
3592out_unlocked:
3593	return err;
3594}
3595
3596static int complete_formation(struct module *mod, struct load_info *info)
3597{
3598	int err;
3599
3600	mutex_lock(&module_mutex);
3601
3602	/* Find duplicate symbols (must be called under lock). */
3603	err = verify_export_symbols(mod);
3604	if (err < 0)
3605		goto out;
3606
3607	/* This relies on module_mutex for list integrity. */
3608	module_bug_finalize(info->hdr, info->sechdrs, mod);
3609
3610	module_enable_ro(mod, false);
3611	module_enable_nx(mod);
 
3612
3613	/* Mark state as coming so strong_try_module_get() ignores us,
3614	 * but kallsyms etc. can see us. */
3615	mod->state = MODULE_STATE_COMING;
3616	mutex_unlock(&module_mutex);
3617
3618	return 0;
3619
3620out:
3621	mutex_unlock(&module_mutex);
3622	return err;
3623}
3624
3625static int prepare_coming_module(struct module *mod)
3626{
3627	int err;
3628
3629	ftrace_module_enable(mod);
3630	err = klp_module_coming(mod);
3631	if (err)
3632		return err;
3633
3634	blocking_notifier_call_chain(&module_notify_list,
3635				     MODULE_STATE_COMING, mod);
3636	return 0;
3637}
3638
3639static int unknown_module_param_cb(char *param, char *val, const char *modname,
3640				   void *arg)
3641{
3642	struct module *mod = arg;
3643	int ret;
3644
3645	if (strcmp(param, "async_probe") == 0) {
3646		mod->async_probe_requested = true;
3647		return 0;
3648	}
3649
3650	/* Check for magic 'dyndbg' arg */
3651	ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3652	if (ret != 0)
3653		pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3654	return 0;
3655}
3656
3657/* Allocate and load the module: note that size of section 0 is always
3658   zero, and we rely on this for optional sections. */
3659static int load_module(struct load_info *info, const char __user *uargs,
3660		       int flags)
3661{
3662	struct module *mod;
3663	long err;
3664	char *after_dashes;
3665
 
 
 
 
 
 
 
 
 
 
 
 
 
3666	err = module_sig_check(info, flags);
3667	if (err)
3668		goto free_copy;
3669
3670	err = elf_header_check(info);
3671	if (err)
3672		goto free_copy;
3673
 
 
 
 
 
 
3674	/* Figure out module layout, and allocate all the memory. */
3675	mod = layout_and_allocate(info, flags);
3676	if (IS_ERR(mod)) {
3677		err = PTR_ERR(mod);
3678		goto free_copy;
3679	}
3680
3681	audit_log_kern_module(mod->name);
3682
3683	/* Reserve our place in the list. */
3684	err = add_unformed_module(mod);
3685	if (err)
3686		goto free_module;
3687
3688#ifdef CONFIG_MODULE_SIG
3689	mod->sig_ok = info->sig_ok;
3690	if (!mod->sig_ok) {
3691		pr_notice_once("%s: module verification failed: signature "
3692			       "and/or required key missing - tainting "
3693			       "kernel\n", mod->name);
3694		add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3695	}
3696#endif
3697
3698	/* To avoid stressing percpu allocator, do this once we're unique. */
3699	err = percpu_modalloc(mod, info);
3700	if (err)
3701		goto unlink_mod;
3702
3703	/* Now module is in final location, initialize linked lists, etc. */
3704	err = module_unload_init(mod);
3705	if (err)
3706		goto unlink_mod;
3707
3708	init_param_lock(mod);
3709
3710	/* Now we've got everything in the final locations, we can
3711	 * find optional sections. */
3712	err = find_module_sections(mod, info);
3713	if (err)
3714		goto free_unload;
3715
3716	err = check_module_license_and_versions(mod);
3717	if (err)
3718		goto free_unload;
3719
3720	/* Set up MODINFO_ATTR fields */
3721	setup_modinfo(mod, info);
3722
3723	/* Fix up syms, so that st_value is a pointer to location. */
3724	err = simplify_symbols(mod, info);
3725	if (err < 0)
3726		goto free_modinfo;
3727
3728	err = apply_relocations(mod, info);
3729	if (err < 0)
3730		goto free_modinfo;
3731
3732	err = post_relocation(mod, info);
3733	if (err < 0)
3734		goto free_modinfo;
3735
3736	flush_module_icache(mod);
3737
3738	/* Now copy in args */
3739	mod->args = strndup_user(uargs, ~0UL >> 1);
3740	if (IS_ERR(mod->args)) {
3741		err = PTR_ERR(mod->args);
3742		goto free_arch_cleanup;
3743	}
3744
3745	dynamic_debug_setup(mod, info->debug, info->num_debug);
3746
3747	/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3748	ftrace_module_init(mod);
3749
3750	/* Finally it's fully formed, ready to start executing. */
3751	err = complete_formation(mod, info);
3752	if (err)
3753		goto ddebug_cleanup;
3754
3755	err = prepare_coming_module(mod);
3756	if (err)
3757		goto bug_cleanup;
3758
3759	/* Module is ready to execute: parsing args may do that. */
3760	after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3761				  -32768, 32767, mod,
3762				  unknown_module_param_cb);
3763	if (IS_ERR(after_dashes)) {
3764		err = PTR_ERR(after_dashes);
3765		goto coming_cleanup;
3766	} else if (after_dashes) {
3767		pr_warn("%s: parameters '%s' after `--' ignored\n",
3768		       mod->name, after_dashes);
3769	}
3770
3771	/* Link in to sysfs. */
3772	err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3773	if (err < 0)
3774		goto coming_cleanup;
3775
3776	if (is_livepatch_module(mod)) {
3777		err = copy_module_elf(mod, info);
3778		if (err < 0)
3779			goto sysfs_cleanup;
3780	}
3781
3782	/* Get rid of temporary copy. */
3783	free_copy(info);
3784
3785	/* Done! */
3786	trace_module_load(mod);
3787
3788	return do_init_module(mod);
3789
3790 sysfs_cleanup:
3791	mod_sysfs_teardown(mod);
3792 coming_cleanup:
3793	mod->state = MODULE_STATE_GOING;
3794	destroy_params(mod->kp, mod->num_kp);
3795	blocking_notifier_call_chain(&module_notify_list,
3796				     MODULE_STATE_GOING, mod);
3797	klp_module_going(mod);
3798 bug_cleanup:
3799	/* module_bug_cleanup needs module_mutex protection */
3800	mutex_lock(&module_mutex);
3801	module_bug_cleanup(mod);
3802	mutex_unlock(&module_mutex);
3803
3804	/* we can't deallocate the module until we clear memory protection */
3805	module_disable_ro(mod);
3806	module_disable_nx(mod);
3807
3808 ddebug_cleanup:
3809	ftrace_release_mod(mod);
3810	dynamic_debug_remove(mod, info->debug);
3811	synchronize_sched();
3812	kfree(mod->args);
3813 free_arch_cleanup:
3814	module_arch_cleanup(mod);
3815 free_modinfo:
3816	free_modinfo(mod);
3817 free_unload:
3818	module_unload_free(mod);
3819 unlink_mod:
3820	mutex_lock(&module_mutex);
3821	/* Unlink carefully: kallsyms could be walking list. */
3822	list_del_rcu(&mod->list);
3823	mod_tree_remove(mod);
3824	wake_up_all(&module_wq);
3825	/* Wait for RCU-sched synchronizing before releasing mod->list. */
3826	synchronize_sched();
3827	mutex_unlock(&module_mutex);
3828 free_module:
3829	/* Free lock-classes; relies on the preceding sync_rcu() */
3830	lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
3831
3832	module_deallocate(mod, info);
3833 free_copy:
3834	free_copy(info);
3835	return err;
3836}
3837
3838SYSCALL_DEFINE3(init_module, void __user *, umod,
3839		unsigned long, len, const char __user *, uargs)
3840{
3841	int err;
3842	struct load_info info = { };
3843
3844	err = may_init_module();
3845	if (err)
3846		return err;
3847
3848	pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3849	       umod, len, uargs);
3850
3851	err = copy_module_from_user(umod, len, &info);
3852	if (err)
3853		return err;
3854
3855	return load_module(&info, uargs, 0);
3856}
3857
3858SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3859{
3860	struct load_info info = { };
3861	loff_t size;
3862	void *hdr;
3863	int err;
3864
3865	err = may_init_module();
3866	if (err)
3867		return err;
3868
3869	pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3870
3871	if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3872		      |MODULE_INIT_IGNORE_VERMAGIC))
3873		return -EINVAL;
3874
3875	err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
3876				       READING_MODULE);
3877	if (err)
3878		return err;
3879	info.hdr = hdr;
3880	info.len = size;
3881
3882	return load_module(&info, uargs, flags);
3883}
3884
3885static inline int within(unsigned long addr, void *start, unsigned long size)
3886{
3887	return ((void *)addr >= start && (void *)addr < start + size);
3888}
3889
3890#ifdef CONFIG_KALLSYMS
3891/*
3892 * This ignores the intensely annoying "mapping symbols" found
3893 * in ARM ELF files: $a, $t and $d.
3894 */
3895static inline int is_arm_mapping_symbol(const char *str)
3896{
3897	if (str[0] == '.' && str[1] == 'L')
3898		return true;
3899	return str[0] == '$' && strchr("axtd", str[1])
3900	       && (str[2] == '\0' || str[2] == '.');
3901}
3902
3903static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
3904{
3905	return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
3906}
3907
3908static const char *get_ksymbol(struct module *mod,
3909			       unsigned long addr,
3910			       unsigned long *size,
3911			       unsigned long *offset)
 
 
 
 
3912{
3913	unsigned int i, best = 0;
3914	unsigned long nextval;
3915	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
3916
3917	/* At worse, next value is at end of module */
3918	if (within_module_init(addr, mod))
3919		nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
3920	else
3921		nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
3922
 
 
3923	/* Scan for closest preceding symbol, and next symbol. (ELF
3924	   starts real symbols at 1). */
3925	for (i = 1; i < kallsyms->num_symtab; i++) {
3926		if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
 
 
 
3927			continue;
3928
3929		/* We ignore unnamed symbols: they're uninformative
3930		 * and inserted at a whim. */
3931		if (*symname(kallsyms, i) == '\0'
3932		    || is_arm_mapping_symbol(symname(kallsyms, i)))
3933			continue;
3934
3935		if (kallsyms->symtab[i].st_value <= addr
3936		    && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
3937			best = i;
3938		if (kallsyms->symtab[i].st_value > addr
3939		    && kallsyms->symtab[i].st_value < nextval)
3940			nextval = kallsyms->symtab[i].st_value;
 
3941	}
3942
3943	if (!best)
3944		return NULL;
3945
3946	if (size)
3947		*size = nextval - kallsyms->symtab[best].st_value;
3948	if (offset)
3949		*offset = addr - kallsyms->symtab[best].st_value;
3950	return symname(kallsyms, best);
 
3951}
3952
3953void * __weak dereference_module_function_descriptor(struct module *mod,
3954						     void *ptr)
3955{
3956	return ptr;
3957}
3958
3959/* For kallsyms to ask for address resolution.  NULL means not found.  Careful
3960 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3961const char *module_address_lookup(unsigned long addr,
3962			    unsigned long *size,
3963			    unsigned long *offset,
3964			    char **modname,
3965			    char *namebuf)
3966{
3967	const char *ret = NULL;
3968	struct module *mod;
3969
3970	preempt_disable();
3971	mod = __module_address(addr);
3972	if (mod) {
3973		if (modname)
3974			*modname = mod->name;
3975		ret = get_ksymbol(mod, addr, size, offset);
 
3976	}
3977	/* Make a copy in here where it's safe */
3978	if (ret) {
3979		strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3980		ret = namebuf;
3981	}
3982	preempt_enable();
3983
3984	return ret;
3985}
3986
3987int lookup_module_symbol_name(unsigned long addr, char *symname)
3988{
3989	struct module *mod;
3990
3991	preempt_disable();
3992	list_for_each_entry_rcu(mod, &modules, list) {
3993		if (mod->state == MODULE_STATE_UNFORMED)
3994			continue;
3995		if (within_module(addr, mod)) {
3996			const char *sym;
3997
3998			sym = get_ksymbol(mod, addr, NULL, NULL);
3999			if (!sym)
4000				goto out;
 
4001			strlcpy(symname, sym, KSYM_NAME_LEN);
4002			preempt_enable();
4003			return 0;
4004		}
4005	}
4006out:
4007	preempt_enable();
4008	return -ERANGE;
4009}
4010
4011int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
4012			unsigned long *offset, char *modname, char *name)
4013{
4014	struct module *mod;
4015
4016	preempt_disable();
4017	list_for_each_entry_rcu(mod, &modules, list) {
4018		if (mod->state == MODULE_STATE_UNFORMED)
4019			continue;
4020		if (within_module(addr, mod)) {
4021			const char *sym;
4022
4023			sym = get_ksymbol(mod, addr, size, offset);
4024			if (!sym)
4025				goto out;
4026			if (modname)
4027				strlcpy(modname, mod->name, MODULE_NAME_LEN);
4028			if (name)
4029				strlcpy(name, sym, KSYM_NAME_LEN);
4030			preempt_enable();
4031			return 0;
4032		}
4033	}
4034out:
4035	preempt_enable();
4036	return -ERANGE;
4037}
4038
4039int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
4040			char *name, char *module_name, int *exported)
4041{
4042	struct module *mod;
4043
4044	preempt_disable();
4045	list_for_each_entry_rcu(mod, &modules, list) {
4046		struct mod_kallsyms *kallsyms;
4047
4048		if (mod->state == MODULE_STATE_UNFORMED)
4049			continue;
4050		kallsyms = rcu_dereference_sched(mod->kallsyms);
4051		if (symnum < kallsyms->num_symtab) {
4052			*value = kallsyms->symtab[symnum].st_value;
4053			*type = kallsyms->symtab[symnum].st_info;
4054			strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
 
 
4055			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
4056			*exported = is_exported(name, *value, mod);
4057			preempt_enable();
4058			return 0;
4059		}
4060		symnum -= kallsyms->num_symtab;
4061	}
4062	preempt_enable();
4063	return -ERANGE;
4064}
4065
4066static unsigned long mod_find_symname(struct module *mod, const char *name)
 
4067{
4068	unsigned int i;
4069	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4070
4071	for (i = 0; i < kallsyms->num_symtab; i++)
4072		if (strcmp(name, symname(kallsyms, i)) == 0 &&
4073		    kallsyms->symtab[i].st_info != 'U')
4074			return kallsyms->symtab[i].st_value;
 
 
 
4075	return 0;
4076}
4077
4078/* Look for this name: can be of form module:name. */
4079unsigned long module_kallsyms_lookup_name(const char *name)
4080{
4081	struct module *mod;
4082	char *colon;
4083	unsigned long ret = 0;
4084
4085	/* Don't lock: we're in enough trouble already. */
4086	preempt_disable();
4087	if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
4088		if ((mod = find_module_all(name, colon - name, false)) != NULL)
4089			ret = mod_find_symname(mod, colon+1);
4090	} else {
4091		list_for_each_entry_rcu(mod, &modules, list) {
4092			if (mod->state == MODULE_STATE_UNFORMED)
4093				continue;
4094			if ((ret = mod_find_symname(mod, name)) != 0)
4095				break;
4096		}
4097	}
4098	preempt_enable();
4099	return ret;
4100}
4101
4102int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
4103					     struct module *, unsigned long),
4104				   void *data)
4105{
4106	struct module *mod;
4107	unsigned int i;
4108	int ret;
4109
4110	module_assert_mutex();
4111
4112	list_for_each_entry(mod, &modules, list) {
4113		/* We hold module_mutex: no need for rcu_dereference_sched */
4114		struct mod_kallsyms *kallsyms = mod->kallsyms;
4115
4116		if (mod->state == MODULE_STATE_UNFORMED)
4117			continue;
4118		for (i = 0; i < kallsyms->num_symtab; i++) {
4119			ret = fn(data, symname(kallsyms, i),
4120				 mod, kallsyms->symtab[i].st_value);
 
 
 
 
 
4121			if (ret != 0)
4122				return ret;
4123		}
4124	}
4125	return 0;
4126}
4127#endif /* CONFIG_KALLSYMS */
4128
4129/* Maximum number of characters written by module_flags() */
4130#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
4131
4132/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
4133static char *module_flags(struct module *mod, char *buf)
4134{
4135	int bx = 0;
4136
4137	BUG_ON(mod->state == MODULE_STATE_UNFORMED);
4138	if (mod->taints ||
4139	    mod->state == MODULE_STATE_GOING ||
4140	    mod->state == MODULE_STATE_COMING) {
4141		buf[bx++] = '(';
4142		bx += module_flags_taint(mod, buf + bx);
4143		/* Show a - for module-is-being-unloaded */
4144		if (mod->state == MODULE_STATE_GOING)
4145			buf[bx++] = '-';
4146		/* Show a + for module-is-being-loaded */
4147		if (mod->state == MODULE_STATE_COMING)
4148			buf[bx++] = '+';
4149		buf[bx++] = ')';
4150	}
4151	buf[bx] = '\0';
4152
4153	return buf;
4154}
4155
4156#ifdef CONFIG_PROC_FS
4157/* Called by the /proc file system to return a list of modules. */
4158static void *m_start(struct seq_file *m, loff_t *pos)
4159{
4160	mutex_lock(&module_mutex);
4161	return seq_list_start(&modules, *pos);
4162}
4163
4164static void *m_next(struct seq_file *m, void *p, loff_t *pos)
4165{
4166	return seq_list_next(p, &modules, pos);
4167}
4168
4169static void m_stop(struct seq_file *m, void *p)
4170{
4171	mutex_unlock(&module_mutex);
4172}
4173
4174static int m_show(struct seq_file *m, void *p)
4175{
4176	struct module *mod = list_entry(p, struct module, list);
4177	char buf[MODULE_FLAGS_BUF_SIZE];
4178	void *value;
4179
4180	/* We always ignore unformed modules. */
4181	if (mod->state == MODULE_STATE_UNFORMED)
4182		return 0;
4183
4184	seq_printf(m, "%s %u",
4185		   mod->name, mod->init_layout.size + mod->core_layout.size);
4186	print_unload_info(m, mod);
4187
4188	/* Informative for users. */
4189	seq_printf(m, " %s",
4190		   mod->state == MODULE_STATE_GOING ? "Unloading" :
4191		   mod->state == MODULE_STATE_COMING ? "Loading" :
4192		   "Live");
4193	/* Used by oprofile and other similar tools. */
4194	value = m->private ? NULL : mod->core_layout.base;
4195	seq_printf(m, " 0x%px", value);
4196
4197	/* Taints info */
4198	if (mod->taints)
4199		seq_printf(m, " %s", module_flags(mod, buf));
4200
4201	seq_puts(m, "\n");
4202	return 0;
4203}
4204
4205/* Format: modulename size refcount deps address
4206
4207   Where refcount is a number or -, and deps is a comma-separated list
4208   of depends or -.
4209*/
4210static const struct seq_operations modules_op = {
4211	.start	= m_start,
4212	.next	= m_next,
4213	.stop	= m_stop,
4214	.show	= m_show
4215};
4216
4217/*
4218 * This also sets the "private" pointer to non-NULL if the
4219 * kernel pointers should be hidden (so you can just test
4220 * "m->private" to see if you should keep the values private).
4221 *
4222 * We use the same logic as for /proc/kallsyms.
4223 */
4224static int modules_open(struct inode *inode, struct file *file)
4225{
4226	int err = seq_open(file, &modules_op);
4227
4228	if (!err) {
4229		struct seq_file *m = file->private_data;
4230		m->private = kallsyms_show_value() ? NULL : (void *)8ul;
4231	}
4232
4233	return err;
4234}
4235
4236static const struct file_operations proc_modules_operations = {
4237	.open		= modules_open,
4238	.read		= seq_read,
4239	.llseek		= seq_lseek,
4240	.release	= seq_release,
 
4241};
4242
4243static int __init proc_modules_init(void)
4244{
4245	proc_create("modules", 0, NULL, &proc_modules_operations);
4246	return 0;
4247}
4248module_init(proc_modules_init);
4249#endif
4250
4251/* Given an address, look for it in the module exception tables. */
4252const struct exception_table_entry *search_module_extables(unsigned long addr)
4253{
4254	const struct exception_table_entry *e = NULL;
4255	struct module *mod;
4256
4257	preempt_disable();
4258	mod = __module_address(addr);
4259	if (!mod)
4260		goto out;
4261
4262	if (!mod->num_exentries)
4263		goto out;
4264
4265	e = search_extable(mod->extable,
4266			   mod->num_exentries,
4267			   addr);
4268out:
4269	preempt_enable();
4270
4271	/*
4272	 * Now, if we found one, we are running inside it now, hence
4273	 * we cannot unload the module, hence no refcnt needed.
4274	 */
4275	return e;
4276}
4277
4278/*
4279 * is_module_address - is this address inside a module?
4280 * @addr: the address to check.
4281 *
4282 * See is_module_text_address() if you simply want to see if the address
4283 * is code (not data).
4284 */
4285bool is_module_address(unsigned long addr)
4286{
4287	bool ret;
4288
4289	preempt_disable();
4290	ret = __module_address(addr) != NULL;
4291	preempt_enable();
4292
4293	return ret;
4294}
4295
4296/*
4297 * __module_address - get the module which contains an address.
4298 * @addr: the address.
4299 *
4300 * Must be called with preempt disabled or module mutex held so that
4301 * module doesn't get freed during this.
4302 */
4303struct module *__module_address(unsigned long addr)
4304{
4305	struct module *mod;
4306
4307	if (addr < module_addr_min || addr > module_addr_max)
4308		return NULL;
4309
4310	module_assert_mutex_or_preempt();
4311
4312	mod = mod_find(addr);
4313	if (mod) {
4314		BUG_ON(!within_module(addr, mod));
4315		if (mod->state == MODULE_STATE_UNFORMED)
4316			mod = NULL;
4317	}
4318	return mod;
4319}
4320EXPORT_SYMBOL_GPL(__module_address);
4321
4322/*
4323 * is_module_text_address - is this address inside module code?
4324 * @addr: the address to check.
4325 *
4326 * See is_module_address() if you simply want to see if the address is
4327 * anywhere in a module.  See kernel_text_address() for testing if an
4328 * address corresponds to kernel or module code.
4329 */
4330bool is_module_text_address(unsigned long addr)
4331{
4332	bool ret;
4333
4334	preempt_disable();
4335	ret = __module_text_address(addr) != NULL;
4336	preempt_enable();
4337
4338	return ret;
4339}
4340
4341/*
4342 * __module_text_address - get the module whose code contains an address.
4343 * @addr: the address.
4344 *
4345 * Must be called with preempt disabled or module mutex held so that
4346 * module doesn't get freed during this.
4347 */
4348struct module *__module_text_address(unsigned long addr)
4349{
4350	struct module *mod = __module_address(addr);
4351	if (mod) {
4352		/* Make sure it's within the text section. */
4353		if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4354		    && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4355			mod = NULL;
4356	}
4357	return mod;
4358}
4359EXPORT_SYMBOL_GPL(__module_text_address);
4360
4361/* Don't grab lock, we're oopsing. */
4362void print_modules(void)
4363{
4364	struct module *mod;
4365	char buf[MODULE_FLAGS_BUF_SIZE];
4366
4367	printk(KERN_DEFAULT "Modules linked in:");
4368	/* Most callers should already have preempt disabled, but make sure */
4369	preempt_disable();
4370	list_for_each_entry_rcu(mod, &modules, list) {
4371		if (mod->state == MODULE_STATE_UNFORMED)
4372			continue;
4373		pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4374	}
4375	preempt_enable();
4376	if (last_unloaded_module[0])
4377		pr_cont(" [last unloaded: %s]", last_unloaded_module);
4378	pr_cont("\n");
4379}
4380
4381#ifdef CONFIG_MODVERSIONS
4382/* Generate the signature for all relevant module structures here.
4383 * If these change, we don't want to try to parse the module. */
4384void module_layout(struct module *mod,
4385		   struct modversion_info *ver,
4386		   struct kernel_param *kp,
4387		   struct kernel_symbol *ks,
4388		   struct tracepoint * const *tp)
4389{
4390}
4391EXPORT_SYMBOL(module_layout);
4392#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3   Copyright (C) 2002 Richard Henderson
   4   Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
   5
 
 
 
 
 
 
 
 
 
 
 
 
 
   6*/
   7
   8#define INCLUDE_VERMAGIC
   9
  10#include <linux/export.h>
  11#include <linux/extable.h>
  12#include <linux/moduleloader.h>
  13#include <linux/module_signature.h>
  14#include <linux/trace_events.h>
  15#include <linux/init.h>
  16#include <linux/kallsyms.h>
  17#include <linux/file.h>
  18#include <linux/fs.h>
  19#include <linux/sysfs.h>
  20#include <linux/kernel.h>
  21#include <linux/slab.h>
  22#include <linux/vmalloc.h>
  23#include <linux/elf.h>
  24#include <linux/proc_fs.h>
  25#include <linux/security.h>
  26#include <linux/seq_file.h>
  27#include <linux/syscalls.h>
  28#include <linux/fcntl.h>
  29#include <linux/rcupdate.h>
  30#include <linux/capability.h>
  31#include <linux/cpu.h>
  32#include <linux/moduleparam.h>
  33#include <linux/errno.h>
  34#include <linux/err.h>
  35#include <linux/vermagic.h>
  36#include <linux/notifier.h>
  37#include <linux/sched.h>
  38#include <linux/device.h>
  39#include <linux/string.h>
  40#include <linux/mutex.h>
  41#include <linux/rculist.h>
  42#include <linux/uaccess.h>
  43#include <asm/cacheflush.h>
  44#include <linux/set_memory.h>
  45#include <asm/mmu_context.h>
  46#include <linux/license.h>
  47#include <asm/sections.h>
  48#include <linux/tracepoint.h>
  49#include <linux/ftrace.h>
  50#include <linux/livepatch.h>
  51#include <linux/async.h>
  52#include <linux/percpu.h>
  53#include <linux/kmemleak.h>
  54#include <linux/jump_label.h>
  55#include <linux/pfn.h>
  56#include <linux/bsearch.h>
  57#include <linux/dynamic_debug.h>
  58#include <linux/audit.h>
  59#include <uapi/linux/module.h>
  60#include "module-internal.h"
  61
  62#define CREATE_TRACE_POINTS
  63#include <trace/events/module.h>
  64
  65#ifndef ARCH_SHF_SMALL
  66#define ARCH_SHF_SMALL 0
  67#endif
  68
  69/*
  70 * Modules' sections will be aligned on page boundaries
  71 * to ensure complete separation of code and data, but
  72 * only when CONFIG_ARCH_HAS_STRICT_MODULE_RWX=y
  73 */
  74#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
  75# define debug_align(X) ALIGN(X, PAGE_SIZE)
  76#else
  77# define debug_align(X) (X)
  78#endif
  79
  80/* If this is set, the section belongs in the init part of the module */
  81#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
  82
  83/*
  84 * Mutex protects:
  85 * 1) List of modules (also safely readable with preempt_disable),
  86 * 2) module_use links,
  87 * 3) module_addr_min/module_addr_max.
  88 * (delete and add uses RCU list operations). */
  89DEFINE_MUTEX(module_mutex);
  90EXPORT_SYMBOL_GPL(module_mutex);
  91static LIST_HEAD(modules);
  92
  93/* Work queue for freeing init sections in success case */
  94static struct work_struct init_free_wq;
  95static struct llist_head init_free_list;
  96
  97#ifdef CONFIG_MODULES_TREE_LOOKUP
  98
  99/*
 100 * Use a latched RB-tree for __module_address(); this allows us to use
 101 * RCU-sched lookups of the address from any context.
 102 *
 103 * This is conditional on PERF_EVENTS || TRACING because those can really hit
 104 * __module_address() hard by doing a lot of stack unwinding; potentially from
 105 * NMI context.
 106 */
 107
 108static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
 109{
 110	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 111
 112	return (unsigned long)layout->base;
 113}
 114
 115static __always_inline unsigned long __mod_tree_size(struct latch_tree_node *n)
 116{
 117	struct module_layout *layout = container_of(n, struct module_layout, mtn.node);
 118
 119	return (unsigned long)layout->size;
 120}
 121
 122static __always_inline bool
 123mod_tree_less(struct latch_tree_node *a, struct latch_tree_node *b)
 124{
 125	return __mod_tree_val(a) < __mod_tree_val(b);
 126}
 127
 128static __always_inline int
 129mod_tree_comp(void *key, struct latch_tree_node *n)
 130{
 131	unsigned long val = (unsigned long)key;
 132	unsigned long start, end;
 133
 134	start = __mod_tree_val(n);
 135	if (val < start)
 136		return -1;
 137
 138	end = start + __mod_tree_size(n);
 139	if (val >= end)
 140		return 1;
 141
 142	return 0;
 143}
 144
 145static const struct latch_tree_ops mod_tree_ops = {
 146	.less = mod_tree_less,
 147	.comp = mod_tree_comp,
 148};
 149
 150static struct mod_tree_root {
 151	struct latch_tree_root root;
 152	unsigned long addr_min;
 153	unsigned long addr_max;
 154} mod_tree __cacheline_aligned = {
 155	.addr_min = -1UL,
 156};
 157
 158#define module_addr_min mod_tree.addr_min
 159#define module_addr_max mod_tree.addr_max
 160
 161static noinline void __mod_tree_insert(struct mod_tree_node *node)
 162{
 163	latch_tree_insert(&node->node, &mod_tree.root, &mod_tree_ops);
 164}
 165
 166static void __mod_tree_remove(struct mod_tree_node *node)
 167{
 168	latch_tree_erase(&node->node, &mod_tree.root, &mod_tree_ops);
 169}
 170
 171/*
 172 * These modifications: insert, remove_init and remove; are serialized by the
 173 * module_mutex.
 174 */
 175static void mod_tree_insert(struct module *mod)
 176{
 177	mod->core_layout.mtn.mod = mod;
 178	mod->init_layout.mtn.mod = mod;
 179
 180	__mod_tree_insert(&mod->core_layout.mtn);
 181	if (mod->init_layout.size)
 182		__mod_tree_insert(&mod->init_layout.mtn);
 183}
 184
 185static void mod_tree_remove_init(struct module *mod)
 186{
 187	if (mod->init_layout.size)
 188		__mod_tree_remove(&mod->init_layout.mtn);
 189}
 190
 191static void mod_tree_remove(struct module *mod)
 192{
 193	__mod_tree_remove(&mod->core_layout.mtn);
 194	mod_tree_remove_init(mod);
 195}
 196
 197static struct module *mod_find(unsigned long addr)
 198{
 199	struct latch_tree_node *ltn;
 200
 201	ltn = latch_tree_find((void *)addr, &mod_tree.root, &mod_tree_ops);
 202	if (!ltn)
 203		return NULL;
 204
 205	return container_of(ltn, struct mod_tree_node, node)->mod;
 206}
 207
 208#else /* MODULES_TREE_LOOKUP */
 209
 210static unsigned long module_addr_min = -1UL, module_addr_max = 0;
 211
 212static void mod_tree_insert(struct module *mod) { }
 213static void mod_tree_remove_init(struct module *mod) { }
 214static void mod_tree_remove(struct module *mod) { }
 215
 216static struct module *mod_find(unsigned long addr)
 217{
 218	struct module *mod;
 219
 220	list_for_each_entry_rcu(mod, &modules, list,
 221				lockdep_is_held(&module_mutex)) {
 222		if (within_module(addr, mod))
 223			return mod;
 224	}
 225
 226	return NULL;
 227}
 228
 229#endif /* MODULES_TREE_LOOKUP */
 230
 231/*
 232 * Bounds of module text, for speeding up __module_address.
 233 * Protected by module_mutex.
 234 */
 235static void __mod_update_bounds(void *base, unsigned int size)
 236{
 237	unsigned long min = (unsigned long)base;
 238	unsigned long max = min + size;
 239
 240	if (min < module_addr_min)
 241		module_addr_min = min;
 242	if (max > module_addr_max)
 243		module_addr_max = max;
 244}
 245
 246static void mod_update_bounds(struct module *mod)
 247{
 248	__mod_update_bounds(mod->core_layout.base, mod->core_layout.size);
 249	if (mod->init_layout.size)
 250		__mod_update_bounds(mod->init_layout.base, mod->init_layout.size);
 251}
 252
 253#ifdef CONFIG_KGDB_KDB
 254struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
 255#endif /* CONFIG_KGDB_KDB */
 256
 257static void module_assert_mutex(void)
 258{
 259	lockdep_assert_held(&module_mutex);
 260}
 261
 262static void module_assert_mutex_or_preempt(void)
 263{
 264#ifdef CONFIG_LOCKDEP
 265	if (unlikely(!debug_locks))
 266		return;
 267
 268	WARN_ON_ONCE(!rcu_read_lock_sched_held() &&
 269		!lockdep_is_held(&module_mutex));
 270#endif
 271}
 272
 273static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
 
 274module_param(sig_enforce, bool_enable_only, 0644);
 
 275
 276/*
 277 * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
 278 * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
 279 */
 280bool is_module_sig_enforced(void)
 281{
 282	return sig_enforce;
 283}
 284EXPORT_SYMBOL(is_module_sig_enforced);
 285
 286void set_module_sig_enforced(void)
 287{
 288	sig_enforce = true;
 289}
 290
 291/* Block module loading/unloading? */
 292int modules_disabled = 0;
 293core_param(nomodule, modules_disabled, bint, 0);
 294
 295/* Waiting for a module to finish initializing? */
 296static DECLARE_WAIT_QUEUE_HEAD(module_wq);
 297
 298static BLOCKING_NOTIFIER_HEAD(module_notify_list);
 299
 300int register_module_notifier(struct notifier_block *nb)
 301{
 302	return blocking_notifier_chain_register(&module_notify_list, nb);
 303}
 304EXPORT_SYMBOL(register_module_notifier);
 305
 306int unregister_module_notifier(struct notifier_block *nb)
 307{
 308	return blocking_notifier_chain_unregister(&module_notify_list, nb);
 309}
 310EXPORT_SYMBOL(unregister_module_notifier);
 311
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 312/*
 313 * We require a truly strong try_module_get(): 0 means success.
 314 * Otherwise an error is returned due to ongoing or failed
 315 * initialization etc.
 316 */
 317static inline int strong_try_module_get(struct module *mod)
 318{
 319	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
 320	if (mod && mod->state == MODULE_STATE_COMING)
 321		return -EBUSY;
 322	if (try_module_get(mod))
 323		return 0;
 324	else
 325		return -ENOENT;
 326}
 327
 328static inline void add_taint_module(struct module *mod, unsigned flag,
 329				    enum lockdep_ok lockdep_ok)
 330{
 331	add_taint(flag, lockdep_ok);
 332	set_bit(flag, &mod->taints);
 333}
 334
 335/*
 336 * A thread that wants to hold a reference to a module only while it
 337 * is running can call this to safely exit.  nfsd and lockd use this.
 338 */
 339void __noreturn __module_put_and_exit(struct module *mod, long code)
 340{
 341	module_put(mod);
 342	do_exit(code);
 343}
 344EXPORT_SYMBOL(__module_put_and_exit);
 345
 346/* Find a module section: 0 means not found. */
 347static unsigned int find_sec(const struct load_info *info, const char *name)
 348{
 349	unsigned int i;
 350
 351	for (i = 1; i < info->hdr->e_shnum; i++) {
 352		Elf_Shdr *shdr = &info->sechdrs[i];
 353		/* Alloc bit cleared means "ignore it." */
 354		if ((shdr->sh_flags & SHF_ALLOC)
 355		    && strcmp(info->secstrings + shdr->sh_name, name) == 0)
 356			return i;
 357	}
 358	return 0;
 359}
 360
 361/* Find a module section, or NULL. */
 362static void *section_addr(const struct load_info *info, const char *name)
 363{
 364	/* Section 0 has sh_addr 0. */
 365	return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
 366}
 367
 368/* Find a module section, or NULL.  Fill in number of "objects" in section. */
 369static void *section_objs(const struct load_info *info,
 370			  const char *name,
 371			  size_t object_size,
 372			  unsigned int *num)
 373{
 374	unsigned int sec = find_sec(info, name);
 375
 376	/* Section 0 has sh_addr 0 and sh_size 0. */
 377	*num = info->sechdrs[sec].sh_size / object_size;
 378	return (void *)info->sechdrs[sec].sh_addr;
 379}
 380
 381/* Provided by the linker */
 382extern const struct kernel_symbol __start___ksymtab[];
 383extern const struct kernel_symbol __stop___ksymtab[];
 384extern const struct kernel_symbol __start___ksymtab_gpl[];
 385extern const struct kernel_symbol __stop___ksymtab_gpl[];
 386extern const struct kernel_symbol __start___ksymtab_gpl_future[];
 387extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
 388extern const s32 __start___kcrctab[];
 389extern const s32 __start___kcrctab_gpl[];
 390extern const s32 __start___kcrctab_gpl_future[];
 391#ifdef CONFIG_UNUSED_SYMBOLS
 392extern const struct kernel_symbol __start___ksymtab_unused[];
 393extern const struct kernel_symbol __stop___ksymtab_unused[];
 394extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
 395extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
 396extern const s32 __start___kcrctab_unused[];
 397extern const s32 __start___kcrctab_unused_gpl[];
 398#endif
 399
 400#ifndef CONFIG_MODVERSIONS
 401#define symversion(base, idx) NULL
 402#else
 403#define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
 404#endif
 405
 406static bool each_symbol_in_section(const struct symsearch *arr,
 407				   unsigned int arrsize,
 408				   struct module *owner,
 409				   bool (*fn)(const struct symsearch *syms,
 410					      struct module *owner,
 411					      void *data),
 412				   void *data)
 413{
 414	unsigned int j;
 415
 416	for (j = 0; j < arrsize; j++) {
 417		if (fn(&arr[j], owner, data))
 418			return true;
 419	}
 420
 421	return false;
 422}
 423
 424/* Returns true as soon as fn returns true, otherwise false. */
 425static bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
 426				    struct module *owner,
 427				    void *data),
 428			 void *data)
 429{
 430	struct module *mod;
 431	static const struct symsearch arr[] = {
 432		{ __start___ksymtab, __stop___ksymtab, __start___kcrctab,
 433		  NOT_GPL_ONLY, false },
 434		{ __start___ksymtab_gpl, __stop___ksymtab_gpl,
 435		  __start___kcrctab_gpl,
 436		  GPL_ONLY, false },
 437		{ __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
 438		  __start___kcrctab_gpl_future,
 439		  WILL_BE_GPL_ONLY, false },
 440#ifdef CONFIG_UNUSED_SYMBOLS
 441		{ __start___ksymtab_unused, __stop___ksymtab_unused,
 442		  __start___kcrctab_unused,
 443		  NOT_GPL_ONLY, true },
 444		{ __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
 445		  __start___kcrctab_unused_gpl,
 446		  GPL_ONLY, true },
 447#endif
 448	};
 449
 450	module_assert_mutex_or_preempt();
 451
 452	if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
 453		return true;
 454
 455	list_for_each_entry_rcu(mod, &modules, list,
 456				lockdep_is_held(&module_mutex)) {
 457		struct symsearch arr[] = {
 458			{ mod->syms, mod->syms + mod->num_syms, mod->crcs,
 459			  NOT_GPL_ONLY, false },
 460			{ mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
 461			  mod->gpl_crcs,
 462			  GPL_ONLY, false },
 463			{ mod->gpl_future_syms,
 464			  mod->gpl_future_syms + mod->num_gpl_future_syms,
 465			  mod->gpl_future_crcs,
 466			  WILL_BE_GPL_ONLY, false },
 467#ifdef CONFIG_UNUSED_SYMBOLS
 468			{ mod->unused_syms,
 469			  mod->unused_syms + mod->num_unused_syms,
 470			  mod->unused_crcs,
 471			  NOT_GPL_ONLY, true },
 472			{ mod->unused_gpl_syms,
 473			  mod->unused_gpl_syms + mod->num_unused_gpl_syms,
 474			  mod->unused_gpl_crcs,
 475			  GPL_ONLY, true },
 476#endif
 477		};
 478
 479		if (mod->state == MODULE_STATE_UNFORMED)
 480			continue;
 481
 482		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
 483			return true;
 484	}
 485	return false;
 486}
 
 487
 488struct find_symbol_arg {
 489	/* Input */
 490	const char *name;
 491	bool gplok;
 492	bool warn;
 493
 494	/* Output */
 495	struct module *owner;
 496	const s32 *crc;
 497	const struct kernel_symbol *sym;
 498	enum mod_license license;
 499};
 500
 501static bool check_exported_symbol(const struct symsearch *syms,
 502				  struct module *owner,
 503				  unsigned int symnum, void *data)
 504{
 505	struct find_symbol_arg *fsa = data;
 506
 507	if (!fsa->gplok) {
 508		if (syms->license == GPL_ONLY)
 509			return false;
 510		if (syms->license == WILL_BE_GPL_ONLY && fsa->warn) {
 511			pr_warn("Symbol %s is being used by a non-GPL module, "
 512				"which will not be allowed in the future\n",
 513				fsa->name);
 514		}
 515	}
 516
 517#ifdef CONFIG_UNUSED_SYMBOLS
 518	if (syms->unused && fsa->warn) {
 519		pr_warn("Symbol %s is marked as UNUSED, however this module is "
 520			"using it.\n", fsa->name);
 521		pr_warn("This symbol will go away in the future.\n");
 522		pr_warn("Please evaluate if this is the right api to use and "
 523			"if it really is, submit a report to the linux kernel "
 524			"mailing list together with submitting your code for "
 525			"inclusion.\n");
 526	}
 527#endif
 528
 529	fsa->owner = owner;
 530	fsa->crc = symversion(syms->crcs, symnum);
 531	fsa->sym = &syms->start[symnum];
 532	fsa->license = syms->license;
 533	return true;
 534}
 535
 536static unsigned long kernel_symbol_value(const struct kernel_symbol *sym)
 537{
 538#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 539	return (unsigned long)offset_to_ptr(&sym->value_offset);
 540#else
 541	return sym->value;
 542#endif
 543}
 544
 545static const char *kernel_symbol_name(const struct kernel_symbol *sym)
 546{
 547#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 548	return offset_to_ptr(&sym->name_offset);
 549#else
 550	return sym->name;
 551#endif
 552}
 553
 554static const char *kernel_symbol_namespace(const struct kernel_symbol *sym)
 555{
 556#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
 557	if (!sym->namespace_offset)
 558		return NULL;
 559	return offset_to_ptr(&sym->namespace_offset);
 560#else
 561	return sym->namespace;
 562#endif
 563}
 564
 565static int cmp_name(const void *name, const void *sym)
 566{
 567	return strcmp(name, kernel_symbol_name(sym));
 568}
 569
 570static bool find_exported_symbol_in_section(const struct symsearch *syms,
 571					    struct module *owner,
 572					    void *data)
 573{
 574	struct find_symbol_arg *fsa = data;
 575	struct kernel_symbol *sym;
 576
 577	sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
 578			sizeof(struct kernel_symbol), cmp_name);
 579
 580	if (sym != NULL && check_exported_symbol(syms, owner,
 581						 sym - syms->start, data))
 582		return true;
 583
 584	return false;
 585}
 586
 587/* Find an exported symbol and return it, along with, (optional) crc and
 588 * (optional) module which owns it.  Needs preempt disabled or module_mutex. */
 589static const struct kernel_symbol *find_symbol(const char *name,
 590					struct module **owner,
 591					const s32 **crc,
 592					enum mod_license *license,
 593					bool gplok,
 594					bool warn)
 595{
 596	struct find_symbol_arg fsa;
 597
 598	fsa.name = name;
 599	fsa.gplok = gplok;
 600	fsa.warn = warn;
 601
 602	if (each_symbol_section(find_exported_symbol_in_section, &fsa)) {
 603		if (owner)
 604			*owner = fsa.owner;
 605		if (crc)
 606			*crc = fsa.crc;
 607		if (license)
 608			*license = fsa.license;
 609		return fsa.sym;
 610	}
 611
 612	pr_debug("Failed to find symbol %s\n", name);
 613	return NULL;
 614}
 
 615
 616/*
 617 * Search for module by name: must hold module_mutex (or preempt disabled
 618 * for read-only access).
 619 */
 620static struct module *find_module_all(const char *name, size_t len,
 621				      bool even_unformed)
 622{
 623	struct module *mod;
 624
 625	module_assert_mutex_or_preempt();
 626
 627	list_for_each_entry_rcu(mod, &modules, list,
 628				lockdep_is_held(&module_mutex)) {
 629		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
 630			continue;
 631		if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
 632			return mod;
 633	}
 634	return NULL;
 635}
 636
 637struct module *find_module(const char *name)
 638{
 639	module_assert_mutex();
 640	return find_module_all(name, strlen(name), false);
 641}
 642EXPORT_SYMBOL_GPL(find_module);
 643
 644#ifdef CONFIG_SMP
 645
 646static inline void __percpu *mod_percpu(struct module *mod)
 647{
 648	return mod->percpu;
 649}
 650
 651static int percpu_modalloc(struct module *mod, struct load_info *info)
 652{
 653	Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
 654	unsigned long align = pcpusec->sh_addralign;
 655
 656	if (!pcpusec->sh_size)
 657		return 0;
 658
 659	if (align > PAGE_SIZE) {
 660		pr_warn("%s: per-cpu alignment %li > %li\n",
 661			mod->name, align, PAGE_SIZE);
 662		align = PAGE_SIZE;
 663	}
 664
 665	mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
 666	if (!mod->percpu) {
 667		pr_warn("%s: Could not allocate %lu bytes percpu data\n",
 668			mod->name, (unsigned long)pcpusec->sh_size);
 669		return -ENOMEM;
 670	}
 671	mod->percpu_size = pcpusec->sh_size;
 672	return 0;
 673}
 674
 675static void percpu_modfree(struct module *mod)
 676{
 677	free_percpu(mod->percpu);
 678}
 679
 680static unsigned int find_pcpusec(struct load_info *info)
 681{
 682	return find_sec(info, ".data..percpu");
 683}
 684
 685static void percpu_modcopy(struct module *mod,
 686			   const void *from, unsigned long size)
 687{
 688	int cpu;
 689
 690	for_each_possible_cpu(cpu)
 691		memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
 692}
 693
 694bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 695{
 696	struct module *mod;
 697	unsigned int cpu;
 698
 699	preempt_disable();
 700
 701	list_for_each_entry_rcu(mod, &modules, list) {
 702		if (mod->state == MODULE_STATE_UNFORMED)
 703			continue;
 704		if (!mod->percpu_size)
 705			continue;
 706		for_each_possible_cpu(cpu) {
 707			void *start = per_cpu_ptr(mod->percpu, cpu);
 708			void *va = (void *)addr;
 709
 710			if (va >= start && va < start + mod->percpu_size) {
 711				if (can_addr) {
 712					*can_addr = (unsigned long) (va - start);
 713					*can_addr += (unsigned long)
 714						per_cpu_ptr(mod->percpu,
 715							    get_boot_cpu_id());
 716				}
 717				preempt_enable();
 718				return true;
 719			}
 720		}
 721	}
 722
 723	preempt_enable();
 724	return false;
 725}
 726
 727/**
 728 * is_module_percpu_address - test whether address is from module static percpu
 729 * @addr: address to test
 730 *
 731 * Test whether @addr belongs to module static percpu area.
 732 *
 733 * RETURNS:
 734 * %true if @addr is from module static percpu area
 735 */
 736bool is_module_percpu_address(unsigned long addr)
 737{
 738	return __is_module_percpu_address(addr, NULL);
 739}
 740
 741#else /* ... !CONFIG_SMP */
 742
 743static inline void __percpu *mod_percpu(struct module *mod)
 744{
 745	return NULL;
 746}
 747static int percpu_modalloc(struct module *mod, struct load_info *info)
 748{
 749	/* UP modules shouldn't have this section: ENOMEM isn't quite right */
 750	if (info->sechdrs[info->index.pcpu].sh_size != 0)
 751		return -ENOMEM;
 752	return 0;
 753}
 754static inline void percpu_modfree(struct module *mod)
 755{
 756}
 757static unsigned int find_pcpusec(struct load_info *info)
 758{
 759	return 0;
 760}
 761static inline void percpu_modcopy(struct module *mod,
 762				  const void *from, unsigned long size)
 763{
 764	/* pcpusec should be 0, and size of that section should be 0. */
 765	BUG_ON(size != 0);
 766}
 767bool is_module_percpu_address(unsigned long addr)
 768{
 769	return false;
 770}
 771
 772bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
 773{
 774	return false;
 775}
 776
 777#endif /* CONFIG_SMP */
 778
 779#define MODINFO_ATTR(field)	\
 780static void setup_modinfo_##field(struct module *mod, const char *s)  \
 781{                                                                     \
 782	mod->field = kstrdup(s, GFP_KERNEL);                          \
 783}                                                                     \
 784static ssize_t show_modinfo_##field(struct module_attribute *mattr,   \
 785			struct module_kobject *mk, char *buffer)      \
 786{                                                                     \
 787	return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field);  \
 788}                                                                     \
 789static int modinfo_##field##_exists(struct module *mod)               \
 790{                                                                     \
 791	return mod->field != NULL;                                    \
 792}                                                                     \
 793static void free_modinfo_##field(struct module *mod)                  \
 794{                                                                     \
 795	kfree(mod->field);                                            \
 796	mod->field = NULL;                                            \
 797}                                                                     \
 798static struct module_attribute modinfo_##field = {                    \
 799	.attr = { .name = __stringify(field), .mode = 0444 },         \
 800	.show = show_modinfo_##field,                                 \
 801	.setup = setup_modinfo_##field,                               \
 802	.test = modinfo_##field##_exists,                             \
 803	.free = free_modinfo_##field,                                 \
 804};
 805
 806MODINFO_ATTR(version);
 807MODINFO_ATTR(srcversion);
 808
 809static char last_unloaded_module[MODULE_NAME_LEN+1];
 810
 811#ifdef CONFIG_MODULE_UNLOAD
 812
 813EXPORT_TRACEPOINT_SYMBOL(module_get);
 814
 815/* MODULE_REF_BASE is the base reference count by kmodule loader. */
 816#define MODULE_REF_BASE	1
 817
 818/* Init the unload section of the module. */
 819static int module_unload_init(struct module *mod)
 820{
 821	/*
 822	 * Initialize reference counter to MODULE_REF_BASE.
 823	 * refcnt == 0 means module is going.
 824	 */
 825	atomic_set(&mod->refcnt, MODULE_REF_BASE);
 826
 827	INIT_LIST_HEAD(&mod->source_list);
 828	INIT_LIST_HEAD(&mod->target_list);
 829
 830	/* Hold reference count during initialization. */
 831	atomic_inc(&mod->refcnt);
 832
 833	return 0;
 834}
 835
 836/* Does a already use b? */
 837static int already_uses(struct module *a, struct module *b)
 838{
 839	struct module_use *use;
 840
 841	list_for_each_entry(use, &b->source_list, source_list) {
 842		if (use->source == a) {
 843			pr_debug("%s uses %s!\n", a->name, b->name);
 844			return 1;
 845		}
 846	}
 847	pr_debug("%s does not use %s!\n", a->name, b->name);
 848	return 0;
 849}
 850
 851/*
 852 * Module a uses b
 853 *  - we add 'a' as a "source", 'b' as a "target" of module use
 854 *  - the module_use is added to the list of 'b' sources (so
 855 *    'b' can walk the list to see who sourced them), and of 'a'
 856 *    targets (so 'a' can see what modules it targets).
 857 */
 858static int add_module_usage(struct module *a, struct module *b)
 859{
 860	struct module_use *use;
 861
 862	pr_debug("Allocating new usage for %s.\n", a->name);
 863	use = kmalloc(sizeof(*use), GFP_ATOMIC);
 864	if (!use)
 865		return -ENOMEM;
 866
 867	use->source = a;
 868	use->target = b;
 869	list_add(&use->source_list, &b->source_list);
 870	list_add(&use->target_list, &a->target_list);
 871	return 0;
 872}
 873
 874/* Module a uses b: caller needs module_mutex() */
 875static int ref_module(struct module *a, struct module *b)
 876{
 877	int err;
 878
 879	if (b == NULL || already_uses(a, b))
 880		return 0;
 881
 882	/* If module isn't available, we fail. */
 883	err = strong_try_module_get(b);
 884	if (err)
 885		return err;
 886
 887	err = add_module_usage(a, b);
 888	if (err) {
 889		module_put(b);
 890		return err;
 891	}
 892	return 0;
 893}
 
 894
 895/* Clear the unload stuff of the module. */
 896static void module_unload_free(struct module *mod)
 897{
 898	struct module_use *use, *tmp;
 899
 900	mutex_lock(&module_mutex);
 901	list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
 902		struct module *i = use->target;
 903		pr_debug("%s unusing %s\n", mod->name, i->name);
 904		module_put(i);
 905		list_del(&use->source_list);
 906		list_del(&use->target_list);
 907		kfree(use);
 908	}
 909	mutex_unlock(&module_mutex);
 910}
 911
 912#ifdef CONFIG_MODULE_FORCE_UNLOAD
 913static inline int try_force_unload(unsigned int flags)
 914{
 915	int ret = (flags & O_TRUNC);
 916	if (ret)
 917		add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
 918	return ret;
 919}
 920#else
 921static inline int try_force_unload(unsigned int flags)
 922{
 923	return 0;
 924}
 925#endif /* CONFIG_MODULE_FORCE_UNLOAD */
 926
 927/* Try to release refcount of module, 0 means success. */
 928static int try_release_module_ref(struct module *mod)
 929{
 930	int ret;
 931
 932	/* Try to decrement refcnt which we set at loading */
 933	ret = atomic_sub_return(MODULE_REF_BASE, &mod->refcnt);
 934	BUG_ON(ret < 0);
 935	if (ret)
 936		/* Someone can put this right now, recover with checking */
 937		ret = atomic_add_unless(&mod->refcnt, MODULE_REF_BASE, 0);
 938
 939	return ret;
 940}
 941
 942static int try_stop_module(struct module *mod, int flags, int *forced)
 943{
 944	/* If it's not unused, quit unless we're forcing. */
 945	if (try_release_module_ref(mod) != 0) {
 946		*forced = try_force_unload(flags);
 947		if (!(*forced))
 948			return -EWOULDBLOCK;
 949	}
 950
 951	/* Mark it as dying. */
 952	mod->state = MODULE_STATE_GOING;
 953
 954	return 0;
 955}
 956
 957/**
 958 * module_refcount - return the refcount or -1 if unloading
 959 *
 960 * @mod:	the module we're checking
 961 *
 962 * Returns:
 963 *	-1 if the module is in the process of unloading
 964 *	otherwise the number of references in the kernel to the module
 965 */
 966int module_refcount(struct module *mod)
 967{
 968	return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
 969}
 970EXPORT_SYMBOL(module_refcount);
 971
 972/* This exists whether we can unload or not */
 973static void free_module(struct module *mod);
 974
 975SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
 976		unsigned int, flags)
 977{
 978	struct module *mod;
 979	char name[MODULE_NAME_LEN];
 980	int ret, forced = 0;
 981
 982	if (!capable(CAP_SYS_MODULE) || modules_disabled)
 983		return -EPERM;
 984
 985	if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
 986		return -EFAULT;
 987	name[MODULE_NAME_LEN-1] = '\0';
 988
 989	audit_log_kern_module(name);
 990
 991	if (mutex_lock_interruptible(&module_mutex) != 0)
 992		return -EINTR;
 993
 994	mod = find_module(name);
 995	if (!mod) {
 996		ret = -ENOENT;
 997		goto out;
 998	}
 999
1000	if (!list_empty(&mod->source_list)) {
1001		/* Other modules depend on us: get rid of them first. */
1002		ret = -EWOULDBLOCK;
1003		goto out;
1004	}
1005
1006	/* Doing init or already dying? */
1007	if (mod->state != MODULE_STATE_LIVE) {
1008		/* FIXME: if (force), slam module count damn the torpedoes */
1009		pr_debug("%s already dying\n", mod->name);
1010		ret = -EBUSY;
1011		goto out;
1012	}
1013
1014	/* If it has an init func, it must have an exit func to unload */
1015	if (mod->init && !mod->exit) {
1016		forced = try_force_unload(flags);
1017		if (!forced) {
1018			/* This module can't be removed */
1019			ret = -EBUSY;
1020			goto out;
1021		}
1022	}
1023
1024	/* Stop the machine so refcounts can't move and disable module. */
1025	ret = try_stop_module(mod, flags, &forced);
1026	if (ret != 0)
1027		goto out;
1028
1029	mutex_unlock(&module_mutex);
1030	/* Final destruction now no one is using it. */
1031	if (mod->exit != NULL)
1032		mod->exit();
1033	blocking_notifier_call_chain(&module_notify_list,
1034				     MODULE_STATE_GOING, mod);
1035	klp_module_going(mod);
1036	ftrace_release_mod(mod);
1037
1038	async_synchronize_full();
1039
1040	/* Store the name of the last unloaded module for diagnostic purposes */
1041	strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
1042
1043	free_module(mod);
1044	/* someone could wait for the module in add_unformed_module() */
1045	wake_up_all(&module_wq);
1046	return 0;
1047out:
1048	mutex_unlock(&module_mutex);
1049	return ret;
1050}
1051
1052static inline void print_unload_info(struct seq_file *m, struct module *mod)
1053{
1054	struct module_use *use;
1055	int printed_something = 0;
1056
1057	seq_printf(m, " %i ", module_refcount(mod));
1058
1059	/*
1060	 * Always include a trailing , so userspace can differentiate
1061	 * between this and the old multi-field proc format.
1062	 */
1063	list_for_each_entry(use, &mod->source_list, source_list) {
1064		printed_something = 1;
1065		seq_printf(m, "%s,", use->source->name);
1066	}
1067
1068	if (mod->init != NULL && mod->exit == NULL) {
1069		printed_something = 1;
1070		seq_puts(m, "[permanent],");
1071	}
1072
1073	if (!printed_something)
1074		seq_puts(m, "-");
1075}
1076
1077void __symbol_put(const char *symbol)
1078{
1079	struct module *owner;
1080
1081	preempt_disable();
1082	if (!find_symbol(symbol, &owner, NULL, NULL, true, false))
1083		BUG();
1084	module_put(owner);
1085	preempt_enable();
1086}
1087EXPORT_SYMBOL(__symbol_put);
1088
1089/* Note this assumes addr is a function, which it currently always is. */
1090void symbol_put_addr(void *addr)
1091{
1092	struct module *modaddr;
1093	unsigned long a = (unsigned long)dereference_function_descriptor(addr);
1094
1095	if (core_kernel_text(a))
1096		return;
1097
1098	/*
1099	 * Even though we hold a reference on the module; we still need to
1100	 * disable preemption in order to safely traverse the data structure.
1101	 */
1102	preempt_disable();
1103	modaddr = __module_text_address(a);
1104	BUG_ON(!modaddr);
1105	module_put(modaddr);
1106	preempt_enable();
1107}
1108EXPORT_SYMBOL_GPL(symbol_put_addr);
1109
1110static ssize_t show_refcnt(struct module_attribute *mattr,
1111			   struct module_kobject *mk, char *buffer)
1112{
1113	return sprintf(buffer, "%i\n", module_refcount(mk->mod));
1114}
1115
1116static struct module_attribute modinfo_refcnt =
1117	__ATTR(refcnt, 0444, show_refcnt, NULL);
1118
1119void __module_get(struct module *module)
1120{
1121	if (module) {
1122		preempt_disable();
1123		atomic_inc(&module->refcnt);
1124		trace_module_get(module, _RET_IP_);
1125		preempt_enable();
1126	}
1127}
1128EXPORT_SYMBOL(__module_get);
1129
1130bool try_module_get(struct module *module)
1131{
1132	bool ret = true;
1133
1134	if (module) {
1135		preempt_disable();
1136		/* Note: here, we can fail to get a reference */
1137		if (likely(module_is_live(module) &&
1138			   atomic_inc_not_zero(&module->refcnt) != 0))
1139			trace_module_get(module, _RET_IP_);
1140		else
1141			ret = false;
1142
1143		preempt_enable();
1144	}
1145	return ret;
1146}
1147EXPORT_SYMBOL(try_module_get);
1148
1149void module_put(struct module *module)
1150{
1151	int ret;
1152
1153	if (module) {
1154		preempt_disable();
1155		ret = atomic_dec_if_positive(&module->refcnt);
1156		WARN_ON(ret < 0);	/* Failed to put refcount */
1157		trace_module_put(module, _RET_IP_);
1158		preempt_enable();
1159	}
1160}
1161EXPORT_SYMBOL(module_put);
1162
1163#else /* !CONFIG_MODULE_UNLOAD */
1164static inline void print_unload_info(struct seq_file *m, struct module *mod)
1165{
1166	/* We don't know the usage count, or what modules are using. */
1167	seq_puts(m, " - -");
1168}
1169
1170static inline void module_unload_free(struct module *mod)
1171{
1172}
1173
1174static int ref_module(struct module *a, struct module *b)
1175{
1176	return strong_try_module_get(b);
1177}
 
1178
1179static inline int module_unload_init(struct module *mod)
1180{
1181	return 0;
1182}
1183#endif /* CONFIG_MODULE_UNLOAD */
1184
1185static size_t module_flags_taint(struct module *mod, char *buf)
1186{
1187	size_t l = 0;
1188	int i;
1189
1190	for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
1191		if (taint_flags[i].module && test_bit(i, &mod->taints))
1192			buf[l++] = taint_flags[i].c_true;
1193	}
1194
1195	return l;
1196}
1197
1198static ssize_t show_initstate(struct module_attribute *mattr,
1199			      struct module_kobject *mk, char *buffer)
1200{
1201	const char *state = "unknown";
1202
1203	switch (mk->mod->state) {
1204	case MODULE_STATE_LIVE:
1205		state = "live";
1206		break;
1207	case MODULE_STATE_COMING:
1208		state = "coming";
1209		break;
1210	case MODULE_STATE_GOING:
1211		state = "going";
1212		break;
1213	default:
1214		BUG();
1215	}
1216	return sprintf(buffer, "%s\n", state);
1217}
1218
1219static struct module_attribute modinfo_initstate =
1220	__ATTR(initstate, 0444, show_initstate, NULL);
1221
1222static ssize_t store_uevent(struct module_attribute *mattr,
1223			    struct module_kobject *mk,
1224			    const char *buffer, size_t count)
1225{
1226	int rc;
1227
1228	rc = kobject_synth_uevent(&mk->kobj, buffer, count);
1229	return rc ? rc : count;
1230}
1231
1232struct module_attribute module_uevent =
1233	__ATTR(uevent, 0200, NULL, store_uevent);
1234
1235static ssize_t show_coresize(struct module_attribute *mattr,
1236			     struct module_kobject *mk, char *buffer)
1237{
1238	return sprintf(buffer, "%u\n", mk->mod->core_layout.size);
1239}
1240
1241static struct module_attribute modinfo_coresize =
1242	__ATTR(coresize, 0444, show_coresize, NULL);
1243
1244static ssize_t show_initsize(struct module_attribute *mattr,
1245			     struct module_kobject *mk, char *buffer)
1246{
1247	return sprintf(buffer, "%u\n", mk->mod->init_layout.size);
1248}
1249
1250static struct module_attribute modinfo_initsize =
1251	__ATTR(initsize, 0444, show_initsize, NULL);
1252
1253static ssize_t show_taint(struct module_attribute *mattr,
1254			  struct module_kobject *mk, char *buffer)
1255{
1256	size_t l;
1257
1258	l = module_flags_taint(mk->mod, buffer);
1259	buffer[l++] = '\n';
1260	return l;
1261}
1262
1263static struct module_attribute modinfo_taint =
1264	__ATTR(taint, 0444, show_taint, NULL);
1265
1266static struct module_attribute *modinfo_attrs[] = {
1267	&module_uevent,
1268	&modinfo_version,
1269	&modinfo_srcversion,
1270	&modinfo_initstate,
1271	&modinfo_coresize,
1272	&modinfo_initsize,
1273	&modinfo_taint,
1274#ifdef CONFIG_MODULE_UNLOAD
1275	&modinfo_refcnt,
1276#endif
1277	NULL,
1278};
1279
1280static const char vermagic[] = VERMAGIC_STRING;
1281
1282static int try_to_force_load(struct module *mod, const char *reason)
1283{
1284#ifdef CONFIG_MODULE_FORCE_LOAD
1285	if (!test_taint(TAINT_FORCED_MODULE))
1286		pr_warn("%s: %s: kernel tainted.\n", mod->name, reason);
1287	add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1288	return 0;
1289#else
1290	return -ENOEXEC;
1291#endif
1292}
1293
1294#ifdef CONFIG_MODVERSIONS
1295
1296static u32 resolve_rel_crc(const s32 *crc)
1297{
1298	return *(u32 *)((void *)crc + *crc);
1299}
1300
1301static int check_version(const struct load_info *info,
1302			 const char *symname,
1303			 struct module *mod,
1304			 const s32 *crc)
1305{
1306	Elf_Shdr *sechdrs = info->sechdrs;
1307	unsigned int versindex = info->index.vers;
1308	unsigned int i, num_versions;
1309	struct modversion_info *versions;
1310
1311	/* Exporting module didn't supply crcs?  OK, we're already tainted. */
1312	if (!crc)
1313		return 1;
1314
1315	/* No versions at all?  modprobe --force does this. */
1316	if (versindex == 0)
1317		return try_to_force_load(mod, symname) == 0;
1318
1319	versions = (void *) sechdrs[versindex].sh_addr;
1320	num_versions = sechdrs[versindex].sh_size
1321		/ sizeof(struct modversion_info);
1322
1323	for (i = 0; i < num_versions; i++) {
1324		u32 crcval;
1325
1326		if (strcmp(versions[i].name, symname) != 0)
1327			continue;
1328
1329		if (IS_ENABLED(CONFIG_MODULE_REL_CRCS))
1330			crcval = resolve_rel_crc(crc);
1331		else
1332			crcval = *crc;
1333		if (versions[i].crc == crcval)
1334			return 1;
1335		pr_debug("Found checksum %X vs module %lX\n",
1336			 crcval, versions[i].crc);
1337		goto bad_version;
1338	}
1339
1340	/* Broken toolchain. Warn once, then let it go.. */
1341	pr_warn_once("%s: no symbol version for %s\n", info->name, symname);
1342	return 1;
1343
1344bad_version:
1345	pr_warn("%s: disagrees about version of symbol %s\n",
1346	       info->name, symname);
1347	return 0;
1348}
1349
1350static inline int check_modstruct_version(const struct load_info *info,
1351					  struct module *mod)
1352{
1353	const s32 *crc;
1354
1355	/*
1356	 * Since this should be found in kernel (which can't be removed), no
1357	 * locking is necessary -- use preempt_disable() to placate lockdep.
1358	 */
1359	preempt_disable();
1360	if (!find_symbol("module_layout", NULL, &crc, NULL, true, false)) {
 
1361		preempt_enable();
1362		BUG();
1363	}
1364	preempt_enable();
1365	return check_version(info, "module_layout", mod, crc);
 
1366}
1367
1368/* First part is kernel version, which we ignore if module has crcs. */
1369static inline int same_magic(const char *amagic, const char *bmagic,
1370			     bool has_crcs)
1371{
1372	if (has_crcs) {
1373		amagic += strcspn(amagic, " ");
1374		bmagic += strcspn(bmagic, " ");
1375	}
1376	return strcmp(amagic, bmagic) == 0;
1377}
1378#else
1379static inline int check_version(const struct load_info *info,
1380				const char *symname,
1381				struct module *mod,
1382				const s32 *crc)
1383{
1384	return 1;
1385}
1386
1387static inline int check_modstruct_version(const struct load_info *info,
1388					  struct module *mod)
1389{
1390	return 1;
1391}
1392
1393static inline int same_magic(const char *amagic, const char *bmagic,
1394			     bool has_crcs)
1395{
1396	return strcmp(amagic, bmagic) == 0;
1397}
1398#endif /* CONFIG_MODVERSIONS */
1399
1400static char *get_modinfo(const struct load_info *info, const char *tag);
1401static char *get_next_modinfo(const struct load_info *info, const char *tag,
1402			      char *prev);
1403
1404static int verify_namespace_is_imported(const struct load_info *info,
1405					const struct kernel_symbol *sym,
1406					struct module *mod)
1407{
1408	const char *namespace;
1409	char *imported_namespace;
1410
1411	namespace = kernel_symbol_namespace(sym);
1412	if (namespace && namespace[0]) {
1413		imported_namespace = get_modinfo(info, "import_ns");
1414		while (imported_namespace) {
1415			if (strcmp(namespace, imported_namespace) == 0)
1416				return 0;
1417			imported_namespace = get_next_modinfo(
1418				info, "import_ns", imported_namespace);
1419		}
1420#ifdef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1421		pr_warn(
1422#else
1423		pr_err(
1424#endif
1425			"%s: module uses symbol (%s) from namespace %s, but does not import it.\n",
1426			mod->name, kernel_symbol_name(sym), namespace);
1427#ifndef CONFIG_MODULE_ALLOW_MISSING_NAMESPACE_IMPORTS
1428		return -EINVAL;
1429#endif
1430	}
1431	return 0;
1432}
1433
1434static bool inherit_taint(struct module *mod, struct module *owner)
1435{
1436	if (!owner || !test_bit(TAINT_PROPRIETARY_MODULE, &owner->taints))
1437		return true;
1438
1439	if (mod->using_gplonly_symbols) {
1440		pr_err("%s: module using GPL-only symbols uses symbols from proprietary module %s.\n",
1441			mod->name, owner->name);
1442		return false;
1443	}
1444
1445	if (!test_bit(TAINT_PROPRIETARY_MODULE, &mod->taints)) {
1446		pr_warn("%s: module uses symbols from proprietary module %s, inheriting taint.\n",
1447			mod->name, owner->name);
1448		set_bit(TAINT_PROPRIETARY_MODULE, &mod->taints);
1449	}
1450	return true;
1451}
1452
1453/* Resolve a symbol for this module.  I.e. if we find one, record usage. */
1454static const struct kernel_symbol *resolve_symbol(struct module *mod,
1455						  const struct load_info *info,
1456						  const char *name,
1457						  char ownername[])
1458{
1459	struct module *owner;
1460	const struct kernel_symbol *sym;
1461	const s32 *crc;
1462	enum mod_license license;
1463	int err;
1464
1465	/*
1466	 * The module_mutex should not be a heavily contended lock;
1467	 * if we get the occasional sleep here, we'll go an extra iteration
1468	 * in the wait_event_interruptible(), which is harmless.
1469	 */
1470	sched_annotate_sleep();
1471	mutex_lock(&module_mutex);
1472	sym = find_symbol(name, &owner, &crc, &license,
1473			  !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1474	if (!sym)
1475		goto unlock;
1476
1477	if (license == GPL_ONLY)
1478		mod->using_gplonly_symbols = true;
1479
1480	if (!inherit_taint(mod, owner)) {
1481		sym = NULL;
1482		goto getname;
1483	}
1484
1485	if (!check_version(info, name, mod, crc)) {
1486		sym = ERR_PTR(-EINVAL);
1487		goto getname;
1488	}
1489
1490	err = verify_namespace_is_imported(info, sym, mod);
1491	if (err) {
1492		sym = ERR_PTR(err);
1493		goto getname;
1494	}
1495
1496	err = ref_module(mod, owner);
1497	if (err) {
1498		sym = ERR_PTR(err);
1499		goto getname;
1500	}
1501
1502getname:
1503	/* We must make copy under the lock if we failed to get ref. */
1504	strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1505unlock:
1506	mutex_unlock(&module_mutex);
1507	return sym;
1508}
1509
1510static const struct kernel_symbol *
1511resolve_symbol_wait(struct module *mod,
1512		    const struct load_info *info,
1513		    const char *name)
1514{
1515	const struct kernel_symbol *ksym;
1516	char owner[MODULE_NAME_LEN];
1517
1518	if (wait_event_interruptible_timeout(module_wq,
1519			!IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1520			|| PTR_ERR(ksym) != -EBUSY,
1521					     30 * HZ) <= 0) {
1522		pr_warn("%s: gave up waiting for init of module %s.\n",
1523			mod->name, owner);
1524	}
1525	return ksym;
1526}
1527
1528/*
1529 * /sys/module/foo/sections stuff
1530 * J. Corbet <corbet@lwn.net>
1531 */
1532#ifdef CONFIG_SYSFS
1533
1534#ifdef CONFIG_KALLSYMS
1535static inline bool sect_empty(const Elf_Shdr *sect)
1536{
1537	return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1538}
1539
1540struct module_sect_attr {
1541	struct bin_attribute battr;
 
1542	unsigned long address;
1543};
1544
1545struct module_sect_attrs {
1546	struct attribute_group grp;
1547	unsigned int nsections;
1548	struct module_sect_attr attrs[];
1549};
1550
1551#define MODULE_SECT_READ_SIZE (3 /* "0x", "\n" */ + (BITS_PER_LONG / 4))
1552static ssize_t module_sect_read(struct file *file, struct kobject *kobj,
1553				struct bin_attribute *battr,
1554				char *buf, loff_t pos, size_t count)
1555{
1556	struct module_sect_attr *sattr =
1557		container_of(battr, struct module_sect_attr, battr);
1558	char bounce[MODULE_SECT_READ_SIZE + 1];
1559	size_t wrote;
1560
1561	if (pos != 0)
1562		return -EINVAL;
1563
1564	/*
1565	 * Since we're a binary read handler, we must account for the
1566	 * trailing NUL byte that sprintf will write: if "buf" is
1567	 * too small to hold the NUL, or the NUL is exactly the last
1568	 * byte, the read will look like it got truncated by one byte.
1569	 * Since there is no way to ask sprintf nicely to not write
1570	 * the NUL, we have to use a bounce buffer.
1571	 */
1572	wrote = scnprintf(bounce, sizeof(bounce), "0x%px\n",
1573			 kallsyms_show_value(file->f_cred)
1574				? (void *)sattr->address : NULL);
1575	count = min(count, wrote);
1576	memcpy(buf, bounce, count);
1577
1578	return count;
1579}
1580
1581static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1582{
1583	unsigned int section;
1584
1585	for (section = 0; section < sect_attrs->nsections; section++)
1586		kfree(sect_attrs->attrs[section].battr.attr.name);
1587	kfree(sect_attrs);
1588}
1589
1590static void add_sect_attrs(struct module *mod, const struct load_info *info)
1591{
1592	unsigned int nloaded = 0, i, size[2];
1593	struct module_sect_attrs *sect_attrs;
1594	struct module_sect_attr *sattr;
1595	struct bin_attribute **gattr;
1596
1597	/* Count loaded sections and allocate structures */
1598	for (i = 0; i < info->hdr->e_shnum; i++)
1599		if (!sect_empty(&info->sechdrs[i]))
1600			nloaded++;
1601	size[0] = ALIGN(struct_size(sect_attrs, attrs, nloaded),
1602			sizeof(sect_attrs->grp.bin_attrs[0]));
1603	size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.bin_attrs[0]);
 
1604	sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1605	if (sect_attrs == NULL)
1606		return;
1607
1608	/* Setup section attributes. */
1609	sect_attrs->grp.name = "sections";
1610	sect_attrs->grp.bin_attrs = (void *)sect_attrs + size[0];
1611
1612	sect_attrs->nsections = 0;
1613	sattr = &sect_attrs->attrs[0];
1614	gattr = &sect_attrs->grp.bin_attrs[0];
1615	for (i = 0; i < info->hdr->e_shnum; i++) {
1616		Elf_Shdr *sec = &info->sechdrs[i];
1617		if (sect_empty(sec))
1618			continue;
1619		sysfs_bin_attr_init(&sattr->battr);
1620		sattr->address = sec->sh_addr;
1621		sattr->battr.attr.name =
1622			kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL);
1623		if (sattr->battr.attr.name == NULL)
1624			goto out;
1625		sect_attrs->nsections++;
1626		sattr->battr.read = module_sect_read;
1627		sattr->battr.size = MODULE_SECT_READ_SIZE;
1628		sattr->battr.attr.mode = 0400;
1629		*(gattr++) = &(sattr++)->battr;
 
 
1630	}
1631	*gattr = NULL;
1632
1633	if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1634		goto out;
1635
1636	mod->sect_attrs = sect_attrs;
1637	return;
1638  out:
1639	free_sect_attrs(sect_attrs);
1640}
1641
1642static void remove_sect_attrs(struct module *mod)
1643{
1644	if (mod->sect_attrs) {
1645		sysfs_remove_group(&mod->mkobj.kobj,
1646				   &mod->sect_attrs->grp);
1647		/* We are positive that no one is using any sect attrs
1648		 * at this point.  Deallocate immediately. */
1649		free_sect_attrs(mod->sect_attrs);
1650		mod->sect_attrs = NULL;
1651	}
1652}
1653
1654/*
1655 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1656 */
1657
1658struct module_notes_attrs {
1659	struct kobject *dir;
1660	unsigned int notes;
1661	struct bin_attribute attrs[];
1662};
1663
1664static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1665				 struct bin_attribute *bin_attr,
1666				 char *buf, loff_t pos, size_t count)
1667{
1668	/*
1669	 * The caller checked the pos and count against our size.
1670	 */
1671	memcpy(buf, bin_attr->private + pos, count);
1672	return count;
1673}
1674
1675static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1676			     unsigned int i)
1677{
1678	if (notes_attrs->dir) {
1679		while (i-- > 0)
1680			sysfs_remove_bin_file(notes_attrs->dir,
1681					      &notes_attrs->attrs[i]);
1682		kobject_put(notes_attrs->dir);
1683	}
1684	kfree(notes_attrs);
1685}
1686
1687static void add_notes_attrs(struct module *mod, const struct load_info *info)
1688{
1689	unsigned int notes, loaded, i;
1690	struct module_notes_attrs *notes_attrs;
1691	struct bin_attribute *nattr;
1692
1693	/* failed to create section attributes, so can't create notes */
1694	if (!mod->sect_attrs)
1695		return;
1696
1697	/* Count notes sections and allocate structures.  */
1698	notes = 0;
1699	for (i = 0; i < info->hdr->e_shnum; i++)
1700		if (!sect_empty(&info->sechdrs[i]) &&
1701		    (info->sechdrs[i].sh_type == SHT_NOTE))
1702			++notes;
1703
1704	if (notes == 0)
1705		return;
1706
1707	notes_attrs = kzalloc(struct_size(notes_attrs, attrs, notes),
 
1708			      GFP_KERNEL);
1709	if (notes_attrs == NULL)
1710		return;
1711
1712	notes_attrs->notes = notes;
1713	nattr = &notes_attrs->attrs[0];
1714	for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1715		if (sect_empty(&info->sechdrs[i]))
1716			continue;
1717		if (info->sechdrs[i].sh_type == SHT_NOTE) {
1718			sysfs_bin_attr_init(nattr);
1719			nattr->attr.name = mod->sect_attrs->attrs[loaded].battr.attr.name;
1720			nattr->attr.mode = S_IRUGO;
1721			nattr->size = info->sechdrs[i].sh_size;
1722			nattr->private = (void *) info->sechdrs[i].sh_addr;
1723			nattr->read = module_notes_read;
1724			++nattr;
1725		}
1726		++loaded;
1727	}
1728
1729	notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1730	if (!notes_attrs->dir)
1731		goto out;
1732
1733	for (i = 0; i < notes; ++i)
1734		if (sysfs_create_bin_file(notes_attrs->dir,
1735					  &notes_attrs->attrs[i]))
1736			goto out;
1737
1738	mod->notes_attrs = notes_attrs;
1739	return;
1740
1741  out:
1742	free_notes_attrs(notes_attrs, i);
1743}
1744
1745static void remove_notes_attrs(struct module *mod)
1746{
1747	if (mod->notes_attrs)
1748		free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1749}
1750
1751#else
1752
1753static inline void add_sect_attrs(struct module *mod,
1754				  const struct load_info *info)
1755{
1756}
1757
1758static inline void remove_sect_attrs(struct module *mod)
1759{
1760}
1761
1762static inline void add_notes_attrs(struct module *mod,
1763				   const struct load_info *info)
1764{
1765}
1766
1767static inline void remove_notes_attrs(struct module *mod)
1768{
1769}
1770#endif /* CONFIG_KALLSYMS */
1771
1772static void del_usage_links(struct module *mod)
1773{
1774#ifdef CONFIG_MODULE_UNLOAD
1775	struct module_use *use;
1776
1777	mutex_lock(&module_mutex);
1778	list_for_each_entry(use, &mod->target_list, target_list)
1779		sysfs_remove_link(use->target->holders_dir, mod->name);
1780	mutex_unlock(&module_mutex);
1781#endif
1782}
1783
1784static int add_usage_links(struct module *mod)
1785{
1786	int ret = 0;
1787#ifdef CONFIG_MODULE_UNLOAD
1788	struct module_use *use;
1789
1790	mutex_lock(&module_mutex);
1791	list_for_each_entry(use, &mod->target_list, target_list) {
1792		ret = sysfs_create_link(use->target->holders_dir,
1793					&mod->mkobj.kobj, mod->name);
1794		if (ret)
1795			break;
1796	}
1797	mutex_unlock(&module_mutex);
1798	if (ret)
1799		del_usage_links(mod);
1800#endif
1801	return ret;
1802}
1803
1804static void module_remove_modinfo_attrs(struct module *mod, int end);
1805
1806static int module_add_modinfo_attrs(struct module *mod)
1807{
1808	struct module_attribute *attr;
1809	struct module_attribute *temp_attr;
1810	int error = 0;
1811	int i;
1812
1813	mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1814					(ARRAY_SIZE(modinfo_attrs) + 1)),
1815					GFP_KERNEL);
1816	if (!mod->modinfo_attrs)
1817		return -ENOMEM;
1818
1819	temp_attr = mod->modinfo_attrs;
1820	for (i = 0; (attr = modinfo_attrs[i]); i++) {
1821		if (!attr->test || attr->test(mod)) {
1822			memcpy(temp_attr, attr, sizeof(*temp_attr));
1823			sysfs_attr_init(&temp_attr->attr);
1824			error = sysfs_create_file(&mod->mkobj.kobj,
1825					&temp_attr->attr);
1826			if (error)
1827				goto error_out;
1828			++temp_attr;
1829		}
1830	}
1831
1832	return 0;
1833
1834error_out:
1835	if (i > 0)
1836		module_remove_modinfo_attrs(mod, --i);
1837	else
1838		kfree(mod->modinfo_attrs);
1839	return error;
1840}
1841
1842static void module_remove_modinfo_attrs(struct module *mod, int end)
1843{
1844	struct module_attribute *attr;
1845	int i;
1846
1847	for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1848		if (end >= 0 && i > end)
1849			break;
1850		/* pick a field to test for end of list */
1851		if (!attr->attr.name)
1852			break;
1853		sysfs_remove_file(&mod->mkobj.kobj, &attr->attr);
1854		if (attr->free)
1855			attr->free(mod);
1856	}
1857	kfree(mod->modinfo_attrs);
1858}
1859
1860static void mod_kobject_put(struct module *mod)
1861{
1862	DECLARE_COMPLETION_ONSTACK(c);
1863	mod->mkobj.kobj_completion = &c;
1864	kobject_put(&mod->mkobj.kobj);
1865	wait_for_completion(&c);
1866}
1867
1868static int mod_sysfs_init(struct module *mod)
1869{
1870	int err;
1871	struct kobject *kobj;
1872
1873	if (!module_sysfs_initialized) {
1874		pr_err("%s: module sysfs not initialized\n", mod->name);
1875		err = -EINVAL;
1876		goto out;
1877	}
1878
1879	kobj = kset_find_obj(module_kset, mod->name);
1880	if (kobj) {
1881		pr_err("%s: module is already loaded\n", mod->name);
1882		kobject_put(kobj);
1883		err = -EINVAL;
1884		goto out;
1885	}
1886
1887	mod->mkobj.mod = mod;
1888
1889	memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1890	mod->mkobj.kobj.kset = module_kset;
1891	err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1892				   "%s", mod->name);
1893	if (err)
1894		mod_kobject_put(mod);
1895
1896	/* delay uevent until full sysfs population */
1897out:
1898	return err;
1899}
1900
1901static int mod_sysfs_setup(struct module *mod,
1902			   const struct load_info *info,
1903			   struct kernel_param *kparam,
1904			   unsigned int num_params)
1905{
1906	int err;
1907
1908	err = mod_sysfs_init(mod);
1909	if (err)
1910		goto out;
1911
1912	mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1913	if (!mod->holders_dir) {
1914		err = -ENOMEM;
1915		goto out_unreg;
1916	}
1917
1918	err = module_param_sysfs_setup(mod, kparam, num_params);
1919	if (err)
1920		goto out_unreg_holders;
1921
1922	err = module_add_modinfo_attrs(mod);
1923	if (err)
1924		goto out_unreg_param;
1925
1926	err = add_usage_links(mod);
1927	if (err)
1928		goto out_unreg_modinfo_attrs;
1929
1930	add_sect_attrs(mod, info);
1931	add_notes_attrs(mod, info);
1932
1933	kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1934	return 0;
1935
1936out_unreg_modinfo_attrs:
1937	module_remove_modinfo_attrs(mod, -1);
1938out_unreg_param:
1939	module_param_sysfs_remove(mod);
1940out_unreg_holders:
1941	kobject_put(mod->holders_dir);
1942out_unreg:
1943	mod_kobject_put(mod);
1944out:
1945	return err;
1946}
1947
1948static void mod_sysfs_fini(struct module *mod)
1949{
1950	remove_notes_attrs(mod);
1951	remove_sect_attrs(mod);
1952	mod_kobject_put(mod);
1953}
1954
1955static void init_param_lock(struct module *mod)
1956{
1957	mutex_init(&mod->param_lock);
1958}
1959#else /* !CONFIG_SYSFS */
1960
1961static int mod_sysfs_setup(struct module *mod,
1962			   const struct load_info *info,
1963			   struct kernel_param *kparam,
1964			   unsigned int num_params)
1965{
1966	return 0;
1967}
1968
1969static void mod_sysfs_fini(struct module *mod)
1970{
1971}
1972
1973static void module_remove_modinfo_attrs(struct module *mod, int end)
1974{
1975}
1976
1977static void del_usage_links(struct module *mod)
1978{
1979}
1980
1981static void init_param_lock(struct module *mod)
1982{
1983}
1984#endif /* CONFIG_SYSFS */
1985
1986static void mod_sysfs_teardown(struct module *mod)
1987{
1988	del_usage_links(mod);
1989	module_remove_modinfo_attrs(mod, -1);
1990	module_param_sysfs_remove(mod);
1991	kobject_put(mod->mkobj.drivers_dir);
1992	kobject_put(mod->holders_dir);
1993	mod_sysfs_fini(mod);
1994}
1995
 
1996/*
1997 * LKM RO/NX protection: protect module's text/ro-data
1998 * from modification and any data from execution.
1999 *
2000 * General layout of module is:
2001 *          [text] [read-only-data] [ro-after-init] [writable data]
2002 * text_size -----^                ^               ^               ^
2003 * ro_size ------------------------|               |               |
2004 * ro_after_init_size -----------------------------|               |
2005 * size -----------------------------------------------------------|
2006 *
2007 * These values are always page-aligned (as is base)
2008 */
2009
2010/*
2011 * Since some arches are moving towards PAGE_KERNEL module allocations instead
2012 * of PAGE_KERNEL_EXEC, keep frob_text() and module_enable_x() outside of the
2013 * CONFIG_STRICT_MODULE_RWX block below because they are needed regardless of
2014 * whether we are strict.
2015 */
2016#ifdef CONFIG_ARCH_HAS_STRICT_MODULE_RWX
2017static void frob_text(const struct module_layout *layout,
2018		      int (*set_memory)(unsigned long start, int num_pages))
2019{
2020	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2021	BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
2022	set_memory((unsigned long)layout->base,
2023		   layout->text_size >> PAGE_SHIFT);
2024}
2025
2026static void module_enable_x(const struct module *mod)
2027{
2028	frob_text(&mod->core_layout, set_memory_x);
2029	frob_text(&mod->init_layout, set_memory_x);
2030}
2031#else /* !CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2032static void module_enable_x(const struct module *mod) { }
2033#endif /* CONFIG_ARCH_HAS_STRICT_MODULE_RWX */
2034
2035#ifdef CONFIG_STRICT_MODULE_RWX
2036static void frob_rodata(const struct module_layout *layout,
2037			int (*set_memory)(unsigned long start, int num_pages))
2038{
2039	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2040	BUG_ON((unsigned long)layout->text_size & (PAGE_SIZE-1));
2041	BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
2042	set_memory((unsigned long)layout->base + layout->text_size,
2043		   (layout->ro_size - layout->text_size) >> PAGE_SHIFT);
2044}
2045
2046static void frob_ro_after_init(const struct module_layout *layout,
2047				int (*set_memory)(unsigned long start, int num_pages))
2048{
2049	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2050	BUG_ON((unsigned long)layout->ro_size & (PAGE_SIZE-1));
2051	BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
2052	set_memory((unsigned long)layout->base + layout->ro_size,
2053		   (layout->ro_after_init_size - layout->ro_size) >> PAGE_SHIFT);
2054}
2055
2056static void frob_writable_data(const struct module_layout *layout,
2057			       int (*set_memory)(unsigned long start, int num_pages))
2058{
2059	BUG_ON((unsigned long)layout->base & (PAGE_SIZE-1));
2060	BUG_ON((unsigned long)layout->ro_after_init_size & (PAGE_SIZE-1));
2061	BUG_ON((unsigned long)layout->size & (PAGE_SIZE-1));
2062	set_memory((unsigned long)layout->base + layout->ro_after_init_size,
2063		   (layout->size - layout->ro_after_init_size) >> PAGE_SHIFT);
2064}
2065
2066static void module_enable_ro(const struct module *mod, bool after_init)
 
 
 
 
 
 
 
 
 
 
 
 
 
2067{
2068	if (!rodata_enabled)
2069		return;
2070
2071	set_vm_flush_reset_perms(mod->core_layout.base);
2072	set_vm_flush_reset_perms(mod->init_layout.base);
2073	frob_text(&mod->core_layout, set_memory_ro);
2074
2075	frob_rodata(&mod->core_layout, set_memory_ro);
2076	frob_text(&mod->init_layout, set_memory_ro);
2077	frob_rodata(&mod->init_layout, set_memory_ro);
2078
2079	if (after_init)
2080		frob_ro_after_init(&mod->core_layout, set_memory_ro);
2081}
2082
2083static void module_enable_nx(const struct module *mod)
2084{
2085	frob_rodata(&mod->core_layout, set_memory_nx);
2086	frob_ro_after_init(&mod->core_layout, set_memory_nx);
2087	frob_writable_data(&mod->core_layout, set_memory_nx);
2088	frob_rodata(&mod->init_layout, set_memory_nx);
2089	frob_writable_data(&mod->init_layout, set_memory_nx);
2090}
2091
2092static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2093				       char *secstrings, struct module *mod)
2094{
2095	const unsigned long shf_wx = SHF_WRITE|SHF_EXECINSTR;
2096	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2097
2098	for (i = 0; i < hdr->e_shnum; i++) {
2099		if ((sechdrs[i].sh_flags & shf_wx) == shf_wx)
2100			return -ENOEXEC;
2101	}
 
 
2102
2103	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2104}
2105
2106#else /* !CONFIG_STRICT_MODULE_RWX */
2107static void module_enable_nx(const struct module *mod) { }
2108static void module_enable_ro(const struct module *mod, bool after_init) {}
2109static int module_enforce_rwx_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
2110				       char *secstrings, struct module *mod)
2111{
2112	return 0;
 
 
 
 
 
 
 
2113}
2114#endif /*  CONFIG_STRICT_MODULE_RWX */
 
 
 
 
 
2115
2116#ifdef CONFIG_LIVEPATCH
2117/*
2118 * Persist Elf information about a module. Copy the Elf header,
2119 * section header table, section string table, and symtab section
2120 * index from info to mod->klp_info.
2121 */
2122static int copy_module_elf(struct module *mod, struct load_info *info)
2123{
2124	unsigned int size, symndx;
2125	int ret;
2126
2127	size = sizeof(*mod->klp_info);
2128	mod->klp_info = kmalloc(size, GFP_KERNEL);
2129	if (mod->klp_info == NULL)
2130		return -ENOMEM;
2131
2132	/* Elf header */
2133	size = sizeof(mod->klp_info->hdr);
2134	memcpy(&mod->klp_info->hdr, info->hdr, size);
2135
2136	/* Elf section header table */
2137	size = sizeof(*info->sechdrs) * info->hdr->e_shnum;
2138	mod->klp_info->sechdrs = kmemdup(info->sechdrs, size, GFP_KERNEL);
2139	if (mod->klp_info->sechdrs == NULL) {
2140		ret = -ENOMEM;
2141		goto free_info;
2142	}
 
2143
2144	/* Elf section name string table */
2145	size = info->sechdrs[info->hdr->e_shstrndx].sh_size;
2146	mod->klp_info->secstrings = kmemdup(info->secstrings, size, GFP_KERNEL);
2147	if (mod->klp_info->secstrings == NULL) {
2148		ret = -ENOMEM;
2149		goto free_sechdrs;
2150	}
 
2151
2152	/* Elf symbol section index */
2153	symndx = info->index.sym;
2154	mod->klp_info->symndx = symndx;
2155
2156	/*
2157	 * For livepatch modules, core_kallsyms.symtab is a complete
2158	 * copy of the original symbol table. Adjust sh_addr to point
2159	 * to core_kallsyms.symtab since the copy of the symtab in module
2160	 * init memory is freed at the end of do_init_module().
2161	 */
2162	mod->klp_info->sechdrs[symndx].sh_addr = \
2163		(unsigned long) mod->core_kallsyms.symtab;
2164
2165	return 0;
2166
2167free_sechdrs:
2168	kfree(mod->klp_info->sechdrs);
2169free_info:
2170	kfree(mod->klp_info);
2171	return ret;
2172}
2173
2174static void free_module_elf(struct module *mod)
2175{
2176	kfree(mod->klp_info->sechdrs);
2177	kfree(mod->klp_info->secstrings);
2178	kfree(mod->klp_info);
2179}
2180#else /* !CONFIG_LIVEPATCH */
2181static int copy_module_elf(struct module *mod, struct load_info *info)
2182{
2183	return 0;
2184}
2185
2186static void free_module_elf(struct module *mod)
2187{
2188}
2189#endif /* CONFIG_LIVEPATCH */
2190
2191void __weak module_memfree(void *module_region)
2192{
2193	/*
2194	 * This memory may be RO, and freeing RO memory in an interrupt is not
2195	 * supported by vmalloc.
2196	 */
2197	WARN_ON(in_interrupt());
2198	vfree(module_region);
2199}
2200
2201void __weak module_arch_cleanup(struct module *mod)
2202{
2203}
2204
2205void __weak module_arch_freeing_init(struct module *mod)
2206{
2207}
2208
2209/* Free a module, remove from lists, etc. */
2210static void free_module(struct module *mod)
2211{
2212	trace_module_free(mod);
2213
2214	mod_sysfs_teardown(mod);
2215
2216	/* We leave it in list to prevent duplicate loads, but make sure
2217	 * that noone uses it while it's being deconstructed. */
2218	mutex_lock(&module_mutex);
2219	mod->state = MODULE_STATE_UNFORMED;
2220	mutex_unlock(&module_mutex);
2221
2222	/* Remove dynamic debug info */
2223	ddebug_remove_module(mod->name);
2224
2225	/* Arch-specific cleanup. */
2226	module_arch_cleanup(mod);
2227
2228	/* Module unload stuff */
2229	module_unload_free(mod);
2230
2231	/* Free any allocated parameters. */
2232	destroy_params(mod->kp, mod->num_kp);
2233
2234	if (is_livepatch_module(mod))
2235		free_module_elf(mod);
2236
2237	/* Now we can delete it from the lists */
2238	mutex_lock(&module_mutex);
2239	/* Unlink carefully: kallsyms could be walking list. */
2240	list_del_rcu(&mod->list);
2241	mod_tree_remove(mod);
2242	/* Remove this module from bug list, this uses list_del_rcu */
2243	module_bug_cleanup(mod);
2244	/* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
2245	synchronize_rcu();
2246	mutex_unlock(&module_mutex);
2247
2248	/* This may be empty, but that's OK */
 
2249	module_arch_freeing_init(mod);
2250	module_memfree(mod->init_layout.base);
2251	kfree(mod->args);
2252	percpu_modfree(mod);
2253
2254	/* Free lock-classes; relies on the preceding sync_rcu(). */
2255	lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
2256
2257	/* Finally, free the core (containing the module structure) */
 
2258	module_memfree(mod->core_layout.base);
2259}
2260
2261void *__symbol_get(const char *symbol)
2262{
2263	struct module *owner;
2264	const struct kernel_symbol *sym;
2265
2266	preempt_disable();
2267	sym = find_symbol(symbol, &owner, NULL, NULL, true, true);
2268	if (sym && strong_try_module_get(owner))
2269		sym = NULL;
2270	preempt_enable();
2271
2272	return sym ? (void *)kernel_symbol_value(sym) : NULL;
2273}
2274EXPORT_SYMBOL_GPL(__symbol_get);
2275
2276/*
2277 * Ensure that an exported symbol [global namespace] does not already exist
2278 * in the kernel or in some other module's exported symbol table.
2279 *
2280 * You must hold the module_mutex.
2281 */
2282static int verify_exported_symbols(struct module *mod)
2283{
2284	unsigned int i;
2285	struct module *owner;
2286	const struct kernel_symbol *s;
2287	struct {
2288		const struct kernel_symbol *sym;
2289		unsigned int num;
2290	} arr[] = {
2291		{ mod->syms, mod->num_syms },
2292		{ mod->gpl_syms, mod->num_gpl_syms },
2293		{ mod->gpl_future_syms, mod->num_gpl_future_syms },
2294#ifdef CONFIG_UNUSED_SYMBOLS
2295		{ mod->unused_syms, mod->num_unused_syms },
2296		{ mod->unused_gpl_syms, mod->num_unused_gpl_syms },
2297#endif
2298	};
2299
2300	for (i = 0; i < ARRAY_SIZE(arr); i++) {
2301		for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
2302			if (find_symbol(kernel_symbol_name(s), &owner, NULL,
2303					NULL, true, false)) {
2304				pr_err("%s: exports duplicate symbol %s"
2305				       " (owned by %s)\n",
2306				       mod->name, kernel_symbol_name(s),
2307				       module_name(owner));
2308				return -ENOEXEC;
2309			}
2310		}
2311	}
2312	return 0;
2313}
2314
2315/* Change all symbols so that st_value encodes the pointer directly. */
2316static int simplify_symbols(struct module *mod, const struct load_info *info)
2317{
2318	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2319	Elf_Sym *sym = (void *)symsec->sh_addr;
2320	unsigned long secbase;
2321	unsigned int i;
2322	int ret = 0;
2323	const struct kernel_symbol *ksym;
2324
2325	for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
2326		const char *name = info->strtab + sym[i].st_name;
2327
2328		switch (sym[i].st_shndx) {
2329		case SHN_COMMON:
2330			/* Ignore common symbols */
2331			if (!strncmp(name, "__gnu_lto", 9))
2332				break;
2333
2334			/* We compiled with -fno-common.  These are not
2335			   supposed to happen.  */
2336			pr_debug("Common symbol: %s\n", name);
2337			pr_warn("%s: please compile with -fno-common\n",
2338			       mod->name);
2339			ret = -ENOEXEC;
2340			break;
2341
2342		case SHN_ABS:
2343			/* Don't need to do anything */
2344			pr_debug("Absolute symbol: 0x%08lx\n",
2345			       (long)sym[i].st_value);
2346			break;
2347
2348		case SHN_LIVEPATCH:
2349			/* Livepatch symbols are resolved by livepatch */
2350			break;
2351
2352		case SHN_UNDEF:
2353			ksym = resolve_symbol_wait(mod, info, name);
2354			/* Ok if resolved.  */
2355			if (ksym && !IS_ERR(ksym)) {
2356				sym[i].st_value = kernel_symbol_value(ksym);
2357				break;
2358			}
2359
2360			/* Ok if weak.  */
2361			if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2362				break;
2363
 
 
2364			ret = PTR_ERR(ksym) ?: -ENOENT;
2365			pr_warn("%s: Unknown symbol %s (err %d)\n",
2366				mod->name, name, ret);
2367			break;
2368
2369		default:
2370			/* Divert to percpu allocation if a percpu var. */
2371			if (sym[i].st_shndx == info->index.pcpu)
2372				secbase = (unsigned long)mod_percpu(mod);
2373			else
2374				secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2375			sym[i].st_value += secbase;
2376			break;
2377		}
2378	}
2379
2380	return ret;
2381}
2382
2383static int apply_relocations(struct module *mod, const struct load_info *info)
2384{
2385	unsigned int i;
2386	int err = 0;
2387
2388	/* Now do relocations. */
2389	for (i = 1; i < info->hdr->e_shnum; i++) {
2390		unsigned int infosec = info->sechdrs[i].sh_info;
2391
2392		/* Not a valid relocation section? */
2393		if (infosec >= info->hdr->e_shnum)
2394			continue;
2395
2396		/* Don't bother with non-allocated sections */
2397		if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2398			continue;
2399
 
2400		if (info->sechdrs[i].sh_flags & SHF_RELA_LIVEPATCH)
2401			err = klp_apply_section_relocs(mod, info->sechdrs,
2402						       info->secstrings,
2403						       info->strtab,
2404						       info->index.sym, i,
2405						       NULL);
2406		else if (info->sechdrs[i].sh_type == SHT_REL)
2407			err = apply_relocate(info->sechdrs, info->strtab,
2408					     info->index.sym, i, mod);
2409		else if (info->sechdrs[i].sh_type == SHT_RELA)
2410			err = apply_relocate_add(info->sechdrs, info->strtab,
2411						 info->index.sym, i, mod);
2412		if (err < 0)
2413			break;
2414	}
2415	return err;
2416}
2417
2418/* Additional bytes needed by arch in front of individual sections */
2419unsigned int __weak arch_mod_section_prepend(struct module *mod,
2420					     unsigned int section)
2421{
2422	/* default implementation just returns zero */
2423	return 0;
2424}
2425
2426/* Update size with this section: return offset. */
2427static long get_offset(struct module *mod, unsigned int *size,
2428		       Elf_Shdr *sechdr, unsigned int section)
2429{
2430	long ret;
2431
2432	*size += arch_mod_section_prepend(mod, section);
2433	ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2434	*size = ret + sechdr->sh_size;
2435	return ret;
2436}
2437
2438/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2439   might -- code, read-only data, read-write data, small data.  Tally
2440   sizes, and place the offsets into sh_entsize fields: high bit means it
2441   belongs in init. */
2442static void layout_sections(struct module *mod, struct load_info *info)
2443{
2444	static unsigned long const masks[][2] = {
2445		/* NOTE: all executable code must be the first section
2446		 * in this array; otherwise modify the text_size
2447		 * finder in the two loops below */
2448		{ SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2449		{ SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2450		{ SHF_RO_AFTER_INIT | SHF_ALLOC, ARCH_SHF_SMALL },
2451		{ SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2452		{ ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2453	};
2454	unsigned int m, i;
2455
2456	for (i = 0; i < info->hdr->e_shnum; i++)
2457		info->sechdrs[i].sh_entsize = ~0UL;
2458
2459	pr_debug("Core section allocation order:\n");
2460	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2461		for (i = 0; i < info->hdr->e_shnum; ++i) {
2462			Elf_Shdr *s = &info->sechdrs[i];
2463			const char *sname = info->secstrings + s->sh_name;
2464
2465			if ((s->sh_flags & masks[m][0]) != masks[m][0]
2466			    || (s->sh_flags & masks[m][1])
2467			    || s->sh_entsize != ~0UL
2468			    || module_init_section(sname))
2469				continue;
2470			s->sh_entsize = get_offset(mod, &mod->core_layout.size, s, i);
2471			pr_debug("\t%s\n", sname);
2472		}
2473		switch (m) {
2474		case 0: /* executable */
2475			mod->core_layout.size = debug_align(mod->core_layout.size);
2476			mod->core_layout.text_size = mod->core_layout.size;
2477			break;
2478		case 1: /* RO: text and ro-data */
2479			mod->core_layout.size = debug_align(mod->core_layout.size);
2480			mod->core_layout.ro_size = mod->core_layout.size;
2481			break;
2482		case 2: /* RO after init */
2483			mod->core_layout.size = debug_align(mod->core_layout.size);
2484			mod->core_layout.ro_after_init_size = mod->core_layout.size;
2485			break;
2486		case 4: /* whole core */
2487			mod->core_layout.size = debug_align(mod->core_layout.size);
2488			break;
2489		}
2490	}
2491
2492	pr_debug("Init section allocation order:\n");
2493	for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2494		for (i = 0; i < info->hdr->e_shnum; ++i) {
2495			Elf_Shdr *s = &info->sechdrs[i];
2496			const char *sname = info->secstrings + s->sh_name;
2497
2498			if ((s->sh_flags & masks[m][0]) != masks[m][0]
2499			    || (s->sh_flags & masks[m][1])
2500			    || s->sh_entsize != ~0UL
2501			    || !module_init_section(sname))
2502				continue;
2503			s->sh_entsize = (get_offset(mod, &mod->init_layout.size, s, i)
2504					 | INIT_OFFSET_MASK);
2505			pr_debug("\t%s\n", sname);
2506		}
2507		switch (m) {
2508		case 0: /* executable */
2509			mod->init_layout.size = debug_align(mod->init_layout.size);
2510			mod->init_layout.text_size = mod->init_layout.size;
2511			break;
2512		case 1: /* RO: text and ro-data */
2513			mod->init_layout.size = debug_align(mod->init_layout.size);
2514			mod->init_layout.ro_size = mod->init_layout.size;
2515			break;
2516		case 2:
2517			/*
2518			 * RO after init doesn't apply to init_layout (only
2519			 * core_layout), so it just takes the value of ro_size.
2520			 */
2521			mod->init_layout.ro_after_init_size = mod->init_layout.ro_size;
2522			break;
2523		case 4: /* whole init */
2524			mod->init_layout.size = debug_align(mod->init_layout.size);
2525			break;
2526		}
2527	}
2528}
2529
2530static void set_license(struct module *mod, const char *license)
2531{
2532	if (!license)
2533		license = "unspecified";
2534
2535	if (!license_is_gpl_compatible(license)) {
2536		if (!test_taint(TAINT_PROPRIETARY_MODULE))
2537			pr_warn("%s: module license '%s' taints kernel.\n",
2538				mod->name, license);
2539		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2540				 LOCKDEP_NOW_UNRELIABLE);
2541	}
2542}
2543
2544/* Parse tag=value strings from .modinfo section */
2545static char *next_string(char *string, unsigned long *secsize)
2546{
2547	/* Skip non-zero chars */
2548	while (string[0]) {
2549		string++;
2550		if ((*secsize)-- <= 1)
2551			return NULL;
2552	}
2553
2554	/* Skip any zero padding. */
2555	while (!string[0]) {
2556		string++;
2557		if ((*secsize)-- <= 1)
2558			return NULL;
2559	}
2560	return string;
2561}
2562
2563static char *get_next_modinfo(const struct load_info *info, const char *tag,
2564			      char *prev)
2565{
2566	char *p;
2567	unsigned int taglen = strlen(tag);
2568	Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2569	unsigned long size = infosec->sh_size;
2570
2571	/*
2572	 * get_modinfo() calls made before rewrite_section_headers()
2573	 * must use sh_offset, as sh_addr isn't set!
2574	 */
2575	char *modinfo = (char *)info->hdr + infosec->sh_offset;
2576
2577	if (prev) {
2578		size -= prev - modinfo;
2579		modinfo = next_string(prev, &size);
2580	}
2581
2582	for (p = modinfo; p; p = next_string(p, &size)) {
2583		if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2584			return p + taglen + 1;
2585	}
2586	return NULL;
2587}
2588
2589static char *get_modinfo(const struct load_info *info, const char *tag)
2590{
2591	return get_next_modinfo(info, tag, NULL);
2592}
2593
2594static void setup_modinfo(struct module *mod, struct load_info *info)
2595{
2596	struct module_attribute *attr;
2597	int i;
2598
2599	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2600		if (attr->setup)
2601			attr->setup(mod, get_modinfo(info, attr->attr.name));
2602	}
2603}
2604
2605static void free_modinfo(struct module *mod)
2606{
2607	struct module_attribute *attr;
2608	int i;
2609
2610	for (i = 0; (attr = modinfo_attrs[i]); i++) {
2611		if (attr->free)
2612			attr->free(mod);
2613	}
2614}
2615
2616#ifdef CONFIG_KALLSYMS
2617
2618/* Lookup exported symbol in given range of kernel_symbols */
2619static const struct kernel_symbol *lookup_exported_symbol(const char *name,
2620							  const struct kernel_symbol *start,
2621							  const struct kernel_symbol *stop)
2622{
2623	return bsearch(name, start, stop - start,
2624			sizeof(struct kernel_symbol), cmp_name);
2625}
2626
2627static int is_exported(const char *name, unsigned long value,
2628		       const struct module *mod)
2629{
2630	const struct kernel_symbol *ks;
2631	if (!mod)
2632		ks = lookup_exported_symbol(name, __start___ksymtab, __stop___ksymtab);
2633	else
2634		ks = lookup_exported_symbol(name, mod->syms, mod->syms + mod->num_syms);
2635
2636	return ks != NULL && kernel_symbol_value(ks) == value;
2637}
2638
2639/* As per nm */
2640static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2641{
2642	const Elf_Shdr *sechdrs = info->sechdrs;
2643
2644	if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2645		if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2646			return 'v';
2647		else
2648			return 'w';
2649	}
2650	if (sym->st_shndx == SHN_UNDEF)
2651		return 'U';
2652	if (sym->st_shndx == SHN_ABS || sym->st_shndx == info->index.pcpu)
2653		return 'a';
2654	if (sym->st_shndx >= SHN_LORESERVE)
2655		return '?';
2656	if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2657		return 't';
2658	if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2659	    && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2660		if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2661			return 'r';
2662		else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2663			return 'g';
2664		else
2665			return 'd';
2666	}
2667	if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2668		if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2669			return 's';
2670		else
2671			return 'b';
2672	}
2673	if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2674		      ".debug")) {
2675		return 'n';
2676	}
2677	return '?';
2678}
2679
2680static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2681			unsigned int shnum, unsigned int pcpundx)
2682{
2683	const Elf_Shdr *sec;
2684
2685	if (src->st_shndx == SHN_UNDEF
2686	    || src->st_shndx >= shnum
2687	    || !src->st_name)
2688		return false;
2689
2690#ifdef CONFIG_KALLSYMS_ALL
2691	if (src->st_shndx == pcpundx)
2692		return true;
2693#endif
2694
2695	sec = sechdrs + src->st_shndx;
2696	if (!(sec->sh_flags & SHF_ALLOC)
2697#ifndef CONFIG_KALLSYMS_ALL
2698	    || !(sec->sh_flags & SHF_EXECINSTR)
2699#endif
2700	    || (sec->sh_entsize & INIT_OFFSET_MASK))
2701		return false;
2702
2703	return true;
2704}
2705
2706/*
2707 * We only allocate and copy the strings needed by the parts of symtab
2708 * we keep.  This is simple, but has the effect of making multiple
2709 * copies of duplicates.  We could be more sophisticated, see
2710 * linux-kernel thread starting with
2711 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2712 */
2713static void layout_symtab(struct module *mod, struct load_info *info)
2714{
2715	Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2716	Elf_Shdr *strsect = info->sechdrs + info->index.str;
2717	const Elf_Sym *src;
2718	unsigned int i, nsrc, ndst, strtab_size = 0;
2719
2720	/* Put symbol section at end of init part of module. */
2721	symsect->sh_flags |= SHF_ALLOC;
2722	symsect->sh_entsize = get_offset(mod, &mod->init_layout.size, symsect,
2723					 info->index.sym) | INIT_OFFSET_MASK;
2724	pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2725
2726	src = (void *)info->hdr + symsect->sh_offset;
2727	nsrc = symsect->sh_size / sizeof(*src);
2728
2729	/* Compute total space required for the core symbols' strtab. */
2730	for (ndst = i = 0; i < nsrc; i++) {
2731		if (i == 0 || is_livepatch_module(mod) ||
2732		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2733				   info->index.pcpu)) {
2734			strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2735			ndst++;
2736		}
2737	}
2738
2739	/* Append room for core symbols at end of core part. */
2740	info->symoffs = ALIGN(mod->core_layout.size, symsect->sh_addralign ?: 1);
2741	info->stroffs = mod->core_layout.size = info->symoffs + ndst * sizeof(Elf_Sym);
2742	mod->core_layout.size += strtab_size;
2743	info->core_typeoffs = mod->core_layout.size;
2744	mod->core_layout.size += ndst * sizeof(char);
2745	mod->core_layout.size = debug_align(mod->core_layout.size);
2746
2747	/* Put string table section at end of init part of module. */
2748	strsect->sh_flags |= SHF_ALLOC;
2749	strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
2750					 info->index.str) | INIT_OFFSET_MASK;
2751	pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2752
2753	/* We'll tack temporary mod_kallsyms on the end. */
2754	mod->init_layout.size = ALIGN(mod->init_layout.size,
2755				      __alignof__(struct mod_kallsyms));
2756	info->mod_kallsyms_init_off = mod->init_layout.size;
2757	mod->init_layout.size += sizeof(struct mod_kallsyms);
2758	info->init_typeoffs = mod->init_layout.size;
2759	mod->init_layout.size += nsrc * sizeof(char);
2760	mod->init_layout.size = debug_align(mod->init_layout.size);
2761}
2762
2763/*
2764 * We use the full symtab and strtab which layout_symtab arranged to
2765 * be appended to the init section.  Later we switch to the cut-down
2766 * core-only ones.
2767 */
2768static void add_kallsyms(struct module *mod, const struct load_info *info)
2769{
2770	unsigned int i, ndst;
2771	const Elf_Sym *src;
2772	Elf_Sym *dst;
2773	char *s;
2774	Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2775
2776	/* Set up to point into init section. */
2777	mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
2778
2779	mod->kallsyms->symtab = (void *)symsec->sh_addr;
2780	mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2781	/* Make sure we get permanent strtab: don't use info->strtab. */
2782	mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2783	mod->kallsyms->typetab = mod->init_layout.base + info->init_typeoffs;
2784
2785	/*
2786	 * Now populate the cut down core kallsyms for after init
2787	 * and set types up while we still have access to sections.
2788	 */
 
 
2789	mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
2790	mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
2791	mod->core_kallsyms.typetab = mod->core_layout.base + info->core_typeoffs;
2792	src = mod->kallsyms->symtab;
2793	for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
2794		mod->kallsyms->typetab[i] = elf_type(src + i, info);
2795		if (i == 0 || is_livepatch_module(mod) ||
2796		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
2797				   info->index.pcpu)) {
2798			mod->core_kallsyms.typetab[ndst] =
2799			    mod->kallsyms->typetab[i];
2800			dst[ndst] = src[i];
2801			dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
2802			s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
2803				     KSYM_NAME_LEN) + 1;
2804		}
2805	}
2806	mod->core_kallsyms.num_symtab = ndst;
2807}
2808#else
2809static inline void layout_symtab(struct module *mod, struct load_info *info)
2810{
2811}
2812
2813static void add_kallsyms(struct module *mod, const struct load_info *info)
2814{
2815}
2816#endif /* CONFIG_KALLSYMS */
2817
2818static void dynamic_debug_setup(struct module *mod, struct _ddebug *debug, unsigned int num)
2819{
2820	if (!debug)
2821		return;
2822	ddebug_add_module(debug, num, mod->name);
 
 
 
 
2823}
2824
2825static void dynamic_debug_remove(struct module *mod, struct _ddebug *debug)
2826{
2827	if (debug)
2828		ddebug_remove_module(mod->name);
2829}
2830
2831void * __weak module_alloc(unsigned long size)
2832{
2833	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2834			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2835			NUMA_NO_NODE, __builtin_return_address(0));
2836}
2837
2838bool __weak module_init_section(const char *name)
2839{
2840	return strstarts(name, ".init");
2841}
2842
2843bool __weak module_exit_section(const char *name)
2844{
2845	return strstarts(name, ".exit");
2846}
2847
2848#ifdef CONFIG_DEBUG_KMEMLEAK
2849static void kmemleak_load_module(const struct module *mod,
2850				 const struct load_info *info)
2851{
2852	unsigned int i;
2853
2854	/* only scan the sections containing data */
2855	kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2856
2857	for (i = 1; i < info->hdr->e_shnum; i++) {
2858		/* Scan all writable sections that's not executable */
2859		if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2860		    !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2861		    (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2862			continue;
2863
2864		kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2865				   info->sechdrs[i].sh_size, GFP_KERNEL);
2866	}
2867}
2868#else
2869static inline void kmemleak_load_module(const struct module *mod,
2870					const struct load_info *info)
2871{
2872}
2873#endif
2874
2875#ifdef CONFIG_MODULE_SIG
2876static int module_sig_check(struct load_info *info, int flags)
2877{
2878	int err = -ENODATA;
2879	const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2880	const char *reason;
2881	const void *mod = info->hdr;
2882
2883	/*
2884	 * Require flags == 0, as a module with version information
2885	 * removed is no longer the module that was signed
2886	 */
2887	if (flags == 0 &&
2888	    info->len > markerlen &&
2889	    memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2890		/* We truncate the module to discard the signature */
2891		info->len -= markerlen;
2892		err = mod_verify_sig(mod, info);
2893	}
2894
2895	switch (err) {
2896	case 0:
2897		info->sig_ok = true;
2898		return 0;
 
2899
2900		/* We don't permit modules to be loaded into trusted kernels
2901		 * without a valid signature on them, but if we're not
2902		 * enforcing, certain errors are non-fatal.
2903		 */
2904	case -ENODATA:
2905		reason = "Loading of unsigned module";
2906		goto decide;
2907	case -ENOPKG:
2908		reason = "Loading of module with unsupported crypto";
2909		goto decide;
2910	case -ENOKEY:
2911		reason = "Loading of module with unavailable key";
2912	decide:
2913		if (is_module_sig_enforced()) {
2914			pr_notice("%s: %s is rejected\n", info->name, reason);
2915			return -EKEYREJECTED;
2916		}
2917
2918		return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
2919
2920		/* All other errors are fatal, including nomem, unparseable
2921		 * signatures and signature check failures - even if signatures
2922		 * aren't required.
2923		 */
2924	default:
2925		return err;
2926	}
2927}
2928#else /* !CONFIG_MODULE_SIG */
2929static int module_sig_check(struct load_info *info, int flags)
2930{
2931	return 0;
2932}
2933#endif /* !CONFIG_MODULE_SIG */
2934
2935/* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2936static int elf_header_check(struct load_info *info)
2937{
2938	if (info->len < sizeof(*(info->hdr)))
2939		return -ENOEXEC;
2940
2941	if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2942	    || info->hdr->e_type != ET_REL
2943	    || !elf_check_arch(info->hdr)
2944	    || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2945		return -ENOEXEC;
2946
2947	if (info->hdr->e_shoff >= info->len
2948	    || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2949		info->len - info->hdr->e_shoff))
2950		return -ENOEXEC;
2951
2952	return 0;
2953}
2954
2955#define COPY_CHUNK_SIZE (16*PAGE_SIZE)
2956
2957static int copy_chunked_from_user(void *dst, const void __user *usrc, unsigned long len)
2958{
2959	do {
2960		unsigned long n = min(len, COPY_CHUNK_SIZE);
2961
2962		if (copy_from_user(dst, usrc, n) != 0)
2963			return -EFAULT;
2964		cond_resched();
2965		dst += n;
2966		usrc += n;
2967		len -= n;
2968	} while (len);
2969	return 0;
2970}
2971
2972#ifdef CONFIG_LIVEPATCH
2973static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2974{
2975	if (get_modinfo(info, "livepatch")) {
2976		mod->klp = true;
2977		add_taint_module(mod, TAINT_LIVEPATCH, LOCKDEP_STILL_OK);
2978		pr_notice_once("%s: tainting kernel with TAINT_LIVEPATCH\n",
2979			       mod->name);
2980	}
2981
2982	return 0;
2983}
2984#else /* !CONFIG_LIVEPATCH */
2985static int check_modinfo_livepatch(struct module *mod, struct load_info *info)
2986{
2987	if (get_modinfo(info, "livepatch")) {
2988		pr_err("%s: module is marked as livepatch module, but livepatch support is disabled",
2989		       mod->name);
2990		return -ENOEXEC;
2991	}
2992
2993	return 0;
2994}
2995#endif /* CONFIG_LIVEPATCH */
2996
2997static void check_modinfo_retpoline(struct module *mod, struct load_info *info)
2998{
2999	if (retpoline_module_ok(get_modinfo(info, "retpoline")))
3000		return;
3001
3002	pr_warn("%s: loading module not compiled with retpoline compiler.\n",
3003		mod->name);
3004}
3005
3006/* Sets info->hdr and info->len. */
3007static int copy_module_from_user(const void __user *umod, unsigned long len,
3008				  struct load_info *info)
3009{
3010	int err;
3011
3012	info->len = len;
3013	if (info->len < sizeof(*(info->hdr)))
3014		return -ENOEXEC;
3015
3016	err = security_kernel_load_data(LOADING_MODULE);
3017	if (err)
3018		return err;
3019
3020	/* Suck in entire file: we'll want most of it. */
3021	info->hdr = __vmalloc(info->len, GFP_KERNEL | __GFP_NOWARN);
 
3022	if (!info->hdr)
3023		return -ENOMEM;
3024
3025	if (copy_chunked_from_user(info->hdr, umod, info->len) != 0) {
3026		vfree(info->hdr);
3027		return -EFAULT;
3028	}
3029
3030	return 0;
3031}
3032
3033static void free_copy(struct load_info *info)
3034{
3035	vfree(info->hdr);
3036}
3037
3038static int rewrite_section_headers(struct load_info *info, int flags)
3039{
3040	unsigned int i;
3041
3042	/* This should always be true, but let's be sure. */
3043	info->sechdrs[0].sh_addr = 0;
3044
3045	for (i = 1; i < info->hdr->e_shnum; i++) {
3046		Elf_Shdr *shdr = &info->sechdrs[i];
3047		if (shdr->sh_type != SHT_NOBITS
3048		    && info->len < shdr->sh_offset + shdr->sh_size) {
3049			pr_err("Module len %lu truncated\n", info->len);
3050			return -ENOEXEC;
3051		}
3052
3053		/* Mark all sections sh_addr with their address in the
3054		   temporary image. */
3055		shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
3056
3057#ifndef CONFIG_MODULE_UNLOAD
3058		/* Don't load .exit sections */
3059		if (module_exit_section(info->secstrings+shdr->sh_name))
3060			shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
3061#endif
3062	}
3063
3064	/* Track but don't keep modinfo and version sections. */
 
 
 
 
3065	info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
 
 
 
 
 
 
3066	info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
3067
3068	return 0;
3069}
3070
3071/*
3072 * Set up our basic convenience variables (pointers to section headers,
3073 * search for module section index etc), and do some basic section
3074 * verification.
3075 *
3076 * Set info->mod to the temporary copy of the module in info->hdr. The final one
3077 * will be allocated in move_module().
3078 */
3079static int setup_load_info(struct load_info *info, int flags)
3080{
3081	unsigned int i;
 
 
3082
3083	/* Set up the convenience variables */
3084	info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
3085	info->secstrings = (void *)info->hdr
3086		+ info->sechdrs[info->hdr->e_shstrndx].sh_offset;
3087
3088	/* Try to find a name early so we can log errors with a module name */
3089	info->index.info = find_sec(info, ".modinfo");
3090	if (info->index.info)
3091		info->name = get_modinfo(info, "name");
3092
3093	/* Find internal symbols and strings. */
3094	for (i = 1; i < info->hdr->e_shnum; i++) {
3095		if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
3096			info->index.sym = i;
3097			info->index.str = info->sechdrs[i].sh_link;
3098			info->strtab = (char *)info->hdr
3099				+ info->sechdrs[info->index.str].sh_offset;
3100			break;
3101		}
3102	}
3103
3104	if (info->index.sym == 0) {
3105		pr_warn("%s: module has no symbols (stripped?)\n",
3106			info->name ?: "(missing .modinfo section or name field)");
3107		return -ENOEXEC;
3108	}
3109
3110	info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
3111	if (!info->index.mod) {
3112		pr_warn("%s: No module found in object\n",
3113			info->name ?: "(missing .modinfo section or name field)");
3114		return -ENOEXEC;
3115	}
3116	/* This is temporary: point mod into copy of data. */
3117	info->mod = (void *)info->hdr + info->sechdrs[info->index.mod].sh_offset;
3118
3119	/*
3120	 * If we didn't load the .modinfo 'name' field earlier, fall back to
3121	 * on-disk struct mod 'name' field.
3122	 */
3123	if (!info->name)
3124		info->name = info->mod->name;
3125
3126	if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
3127		info->index.vers = 0; /* Pretend no __versions section! */
3128	else
3129		info->index.vers = find_sec(info, "__versions");
3130
3131	info->index.pcpu = find_pcpusec(info);
3132
3133	return 0;
 
 
 
 
3134}
3135
3136static int check_modinfo(struct module *mod, struct load_info *info, int flags)
3137{
3138	const char *modmagic = get_modinfo(info, "vermagic");
3139	int err;
3140
3141	if (flags & MODULE_INIT_IGNORE_VERMAGIC)
3142		modmagic = NULL;
3143
3144	/* This is allowed: modprobe --force will invalidate it. */
3145	if (!modmagic) {
3146		err = try_to_force_load(mod, "bad vermagic");
3147		if (err)
3148			return err;
3149	} else if (!same_magic(modmagic, vermagic, info->index.vers)) {
3150		pr_err("%s: version magic '%s' should be '%s'\n",
3151		       info->name, modmagic, vermagic);
3152		return -ENOEXEC;
3153	}
3154
3155	if (!get_modinfo(info, "intree")) {
3156		if (!test_taint(TAINT_OOT_MODULE))
3157			pr_warn("%s: loading out-of-tree module taints kernel.\n",
3158				mod->name);
3159		add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
3160	}
3161
3162	check_modinfo_retpoline(mod, info);
3163
3164	if (get_modinfo(info, "staging")) {
3165		add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
3166		pr_warn("%s: module is from the staging directory, the quality "
3167			"is unknown, you have been warned.\n", mod->name);
3168	}
3169
3170	err = check_modinfo_livepatch(mod, info);
3171	if (err)
3172		return err;
3173
3174	/* Set up license info based on the info section */
3175	set_license(mod, get_modinfo(info, "license"));
3176
3177	return 0;
3178}
3179
3180static int find_module_sections(struct module *mod, struct load_info *info)
3181{
3182	mod->kp = section_objs(info, "__param",
3183			       sizeof(*mod->kp), &mod->num_kp);
3184	mod->syms = section_objs(info, "__ksymtab",
3185				 sizeof(*mod->syms), &mod->num_syms);
3186	mod->crcs = section_addr(info, "__kcrctab");
3187	mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
3188				     sizeof(*mod->gpl_syms),
3189				     &mod->num_gpl_syms);
3190	mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
3191	mod->gpl_future_syms = section_objs(info,
3192					    "__ksymtab_gpl_future",
3193					    sizeof(*mod->gpl_future_syms),
3194					    &mod->num_gpl_future_syms);
3195	mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
3196
3197#ifdef CONFIG_UNUSED_SYMBOLS
3198	mod->unused_syms = section_objs(info, "__ksymtab_unused",
3199					sizeof(*mod->unused_syms),
3200					&mod->num_unused_syms);
3201	mod->unused_crcs = section_addr(info, "__kcrctab_unused");
3202	mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
3203					    sizeof(*mod->unused_gpl_syms),
3204					    &mod->num_unused_gpl_syms);
3205	mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
3206#endif
3207#ifdef CONFIG_CONSTRUCTORS
3208	mod->ctors = section_objs(info, ".ctors",
3209				  sizeof(*mod->ctors), &mod->num_ctors);
3210	if (!mod->ctors)
3211		mod->ctors = section_objs(info, ".init_array",
3212				sizeof(*mod->ctors), &mod->num_ctors);
3213	else if (find_sec(info, ".init_array")) {
3214		/*
3215		 * This shouldn't happen with same compiler and binutils
3216		 * building all parts of the module.
3217		 */
3218		pr_warn("%s: has both .ctors and .init_array.\n",
3219		       mod->name);
3220		return -EINVAL;
3221	}
3222#endif
3223
3224	mod->noinstr_text_start = section_objs(info, ".noinstr.text", 1,
3225						&mod->noinstr_text_size);
3226
3227#ifdef CONFIG_TRACEPOINTS
3228	mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
3229					     sizeof(*mod->tracepoints_ptrs),
3230					     &mod->num_tracepoints);
3231#endif
3232#ifdef CONFIG_TREE_SRCU
3233	mod->srcu_struct_ptrs = section_objs(info, "___srcu_struct_ptrs",
3234					     sizeof(*mod->srcu_struct_ptrs),
3235					     &mod->num_srcu_structs);
3236#endif
3237#ifdef CONFIG_BPF_EVENTS
3238	mod->bpf_raw_events = section_objs(info, "__bpf_raw_tp_map",
3239					   sizeof(*mod->bpf_raw_events),
3240					   &mod->num_bpf_raw_events);
3241#endif
3242#ifdef CONFIG_JUMP_LABEL
3243	mod->jump_entries = section_objs(info, "__jump_table",
3244					sizeof(*mod->jump_entries),
3245					&mod->num_jump_entries);
3246#endif
3247#ifdef CONFIG_EVENT_TRACING
3248	mod->trace_events = section_objs(info, "_ftrace_events",
3249					 sizeof(*mod->trace_events),
3250					 &mod->num_trace_events);
3251	mod->trace_evals = section_objs(info, "_ftrace_eval_map",
3252					sizeof(*mod->trace_evals),
3253					&mod->num_trace_evals);
3254#endif
3255#ifdef CONFIG_TRACING
3256	mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
3257					 sizeof(*mod->trace_bprintk_fmt_start),
3258					 &mod->num_trace_bprintk_fmt);
3259#endif
3260#ifdef CONFIG_FTRACE_MCOUNT_RECORD
3261	/* sechdrs[0].sh_size is always zero */
3262	mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
3263					     sizeof(*mod->ftrace_callsites),
3264					     &mod->num_ftrace_callsites);
3265#endif
3266#ifdef CONFIG_FUNCTION_ERROR_INJECTION
3267	mod->ei_funcs = section_objs(info, "_error_injection_whitelist",
3268					    sizeof(*mod->ei_funcs),
3269					    &mod->num_ei_funcs);
3270#endif
3271#ifdef CONFIG_KPROBES
3272	mod->kprobes_text_start = section_objs(info, ".kprobes.text", 1,
3273						&mod->kprobes_text_size);
3274	mod->kprobe_blacklist = section_objs(info, "_kprobe_blacklist",
3275						sizeof(unsigned long),
3276						&mod->num_kprobe_blacklist);
3277#endif
3278	mod->extable = section_objs(info, "__ex_table",
3279				    sizeof(*mod->extable), &mod->num_exentries);
3280
3281	if (section_addr(info, "__obsparm"))
3282		pr_warn("%s: Ignoring obsolete parameters\n", mod->name);
3283
3284	info->debug = section_objs(info, "__dyndbg",
3285				   sizeof(*info->debug), &info->num_debug);
3286
3287	return 0;
3288}
3289
3290static int move_module(struct module *mod, struct load_info *info)
3291{
3292	int i;
3293	void *ptr;
3294
3295	/* Do the allocs. */
3296	ptr = module_alloc(mod->core_layout.size);
3297	/*
3298	 * The pointer to this block is stored in the module structure
3299	 * which is inside the block. Just mark it as not being a
3300	 * leak.
3301	 */
3302	kmemleak_not_leak(ptr);
3303	if (!ptr)
3304		return -ENOMEM;
3305
3306	memset(ptr, 0, mod->core_layout.size);
3307	mod->core_layout.base = ptr;
3308
3309	if (mod->init_layout.size) {
3310		ptr = module_alloc(mod->init_layout.size);
3311		/*
3312		 * The pointer to this block is stored in the module structure
3313		 * which is inside the block. This block doesn't need to be
3314		 * scanned as it contains data and code that will be freed
3315		 * after the module is initialized.
3316		 */
3317		kmemleak_ignore(ptr);
3318		if (!ptr) {
3319			module_memfree(mod->core_layout.base);
3320			return -ENOMEM;
3321		}
3322		memset(ptr, 0, mod->init_layout.size);
3323		mod->init_layout.base = ptr;
3324	} else
3325		mod->init_layout.base = NULL;
3326
3327	/* Transfer each section which specifies SHF_ALLOC */
3328	pr_debug("final section addresses:\n");
3329	for (i = 0; i < info->hdr->e_shnum; i++) {
3330		void *dest;
3331		Elf_Shdr *shdr = &info->sechdrs[i];
3332
3333		if (!(shdr->sh_flags & SHF_ALLOC))
3334			continue;
3335
3336		if (shdr->sh_entsize & INIT_OFFSET_MASK)
3337			dest = mod->init_layout.base
3338				+ (shdr->sh_entsize & ~INIT_OFFSET_MASK);
3339		else
3340			dest = mod->core_layout.base + shdr->sh_entsize;
3341
3342		if (shdr->sh_type != SHT_NOBITS)
3343			memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
3344		/* Update sh_addr to point to copy in image. */
3345		shdr->sh_addr = (unsigned long)dest;
3346		pr_debug("\t0x%lx %s\n",
3347			 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
3348	}
3349
3350	return 0;
3351}
3352
3353static int check_module_license_and_versions(struct module *mod)
3354{
3355	int prev_taint = test_taint(TAINT_PROPRIETARY_MODULE);
3356
3357	/*
3358	 * ndiswrapper is under GPL by itself, but loads proprietary modules.
3359	 * Don't use add_taint_module(), as it would prevent ndiswrapper from
3360	 * using GPL-only symbols it needs.
3361	 */
3362	if (strcmp(mod->name, "ndiswrapper") == 0)
3363		add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
3364
3365	/* driverloader was caught wrongly pretending to be under GPL */
3366	if (strcmp(mod->name, "driverloader") == 0)
3367		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3368				 LOCKDEP_NOW_UNRELIABLE);
3369
3370	/* lve claims to be GPL but upstream won't provide source */
3371	if (strcmp(mod->name, "lve") == 0)
3372		add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
3373				 LOCKDEP_NOW_UNRELIABLE);
3374
3375	if (!prev_taint && test_taint(TAINT_PROPRIETARY_MODULE))
3376		pr_warn("%s: module license taints kernel.\n", mod->name);
3377
3378#ifdef CONFIG_MODVERSIONS
3379	if ((mod->num_syms && !mod->crcs)
3380	    || (mod->num_gpl_syms && !mod->gpl_crcs)
3381	    || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
3382#ifdef CONFIG_UNUSED_SYMBOLS
3383	    || (mod->num_unused_syms && !mod->unused_crcs)
3384	    || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
3385#endif
3386		) {
3387		return try_to_force_load(mod,
3388					 "no versions for exported symbols");
3389	}
3390#endif
3391	return 0;
3392}
3393
3394static void flush_module_icache(const struct module *mod)
3395{
 
 
 
 
 
 
3396	/*
3397	 * Flush the instruction cache, since we've played with text.
3398	 * Do it before processing of module parameters, so the module
3399	 * can provide parameter accessor functions of its own.
3400	 */
3401	if (mod->init_layout.base)
3402		flush_icache_range((unsigned long)mod->init_layout.base,
3403				   (unsigned long)mod->init_layout.base
3404				   + mod->init_layout.size);
3405	flush_icache_range((unsigned long)mod->core_layout.base,
3406			   (unsigned long)mod->core_layout.base + mod->core_layout.size);
 
 
3407}
3408
3409int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
3410				     Elf_Shdr *sechdrs,
3411				     char *secstrings,
3412				     struct module *mod)
3413{
3414	return 0;
3415}
3416
3417/* module_blacklist is a comma-separated list of module names */
3418static char *module_blacklist;
3419static bool blacklisted(const char *module_name)
3420{
3421	const char *p;
3422	size_t len;
3423
3424	if (!module_blacklist)
3425		return false;
3426
3427	for (p = module_blacklist; *p; p += len) {
3428		len = strcspn(p, ",");
3429		if (strlen(module_name) == len && !memcmp(module_name, p, len))
3430			return true;
3431		if (p[len] == ',')
3432			len++;
3433	}
3434	return false;
3435}
3436core_param(module_blacklist, module_blacklist, charp, 0400);
3437
3438static struct module *layout_and_allocate(struct load_info *info, int flags)
3439{
 
3440	struct module *mod;
3441	unsigned int ndx;
3442	int err;
3443
3444	err = check_modinfo(info->mod, info, flags);
 
 
 
 
 
 
 
3445	if (err)
3446		return ERR_PTR(err);
3447
3448	/* Allow arches to frob section contents and sizes.  */
3449	err = module_frob_arch_sections(info->hdr, info->sechdrs,
3450					info->secstrings, info->mod);
3451	if (err < 0)
3452		return ERR_PTR(err);
3453
3454	err = module_enforce_rwx_sections(info->hdr, info->sechdrs,
3455					  info->secstrings, info->mod);
3456	if (err < 0)
3457		return ERR_PTR(err);
3458
3459	/* We will do a special allocation for per-cpu sections later. */
3460	info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
3461
3462	/*
3463	 * Mark ro_after_init section with SHF_RO_AFTER_INIT so that
3464	 * layout_sections() can put it in the right place.
3465	 * Note: ro_after_init sections also have SHF_{WRITE,ALLOC} set.
3466	 */
3467	ndx = find_sec(info, ".data..ro_after_init");
3468	if (ndx)
3469		info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3470	/*
3471	 * Mark the __jump_table section as ro_after_init as well: these data
3472	 * structures are never modified, with the exception of entries that
3473	 * refer to code in the __init section, which are annotated as such
3474	 * at module load time.
3475	 */
3476	ndx = find_sec(info, "__jump_table");
3477	if (ndx)
3478		info->sechdrs[ndx].sh_flags |= SHF_RO_AFTER_INIT;
3479
3480	/* Determine total sizes, and put offsets in sh_entsize.  For now
3481	   this is done generically; there doesn't appear to be any
3482	   special cases for the architectures. */
3483	layout_sections(info->mod, info);
3484	layout_symtab(info->mod, info);
3485
3486	/* Allocate and move to the final place */
3487	err = move_module(info->mod, info);
3488	if (err)
3489		return ERR_PTR(err);
3490
3491	/* Module has been copied to its final place now: return it. */
3492	mod = (void *)info->sechdrs[info->index.mod].sh_addr;
3493	kmemleak_load_module(mod, info);
3494	return mod;
3495}
3496
3497/* mod is no longer valid after this! */
3498static void module_deallocate(struct module *mod, struct load_info *info)
3499{
3500	percpu_modfree(mod);
3501	module_arch_freeing_init(mod);
3502	module_memfree(mod->init_layout.base);
3503	module_memfree(mod->core_layout.base);
3504}
3505
3506int __weak module_finalize(const Elf_Ehdr *hdr,
3507			   const Elf_Shdr *sechdrs,
3508			   struct module *me)
3509{
3510	return 0;
3511}
3512
3513static int post_relocation(struct module *mod, const struct load_info *info)
3514{
3515	/* Sort exception table now relocations are done. */
3516	sort_extable(mod->extable, mod->extable + mod->num_exentries);
3517
3518	/* Copy relocated percpu area over. */
3519	percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3520		       info->sechdrs[info->index.pcpu].sh_size);
3521
3522	/* Setup kallsyms-specific fields. */
3523	add_kallsyms(mod, info);
3524
3525	/* Arch-specific module finalizing. */
3526	return module_finalize(info->hdr, info->sechdrs, mod);
3527}
3528
3529/* Is this module of this name done loading?  No locks held. */
3530static bool finished_loading(const char *name)
3531{
3532	struct module *mod;
3533	bool ret;
3534
3535	/*
3536	 * The module_mutex should not be a heavily contended lock;
3537	 * if we get the occasional sleep here, we'll go an extra iteration
3538	 * in the wait_event_interruptible(), which is harmless.
3539	 */
3540	sched_annotate_sleep();
3541	mutex_lock(&module_mutex);
3542	mod = find_module_all(name, strlen(name), true);
3543	ret = !mod || mod->state == MODULE_STATE_LIVE;
 
3544	mutex_unlock(&module_mutex);
3545
3546	return ret;
3547}
3548
3549/* Call module constructors. */
3550static void do_mod_ctors(struct module *mod)
3551{
3552#ifdef CONFIG_CONSTRUCTORS
3553	unsigned long i;
3554
3555	for (i = 0; i < mod->num_ctors; i++)
3556		mod->ctors[i]();
3557#endif
3558}
3559
3560/* For freeing module_init on success, in case kallsyms traversing */
3561struct mod_initfree {
3562	struct llist_node node;
3563	void *module_init;
3564};
3565
3566static void do_free_init(struct work_struct *w)
3567{
3568	struct llist_node *pos, *n, *list;
3569	struct mod_initfree *initfree;
3570
3571	list = llist_del_all(&init_free_list);
3572
3573	synchronize_rcu();
3574
3575	llist_for_each_safe(pos, n, list) {
3576		initfree = container_of(pos, struct mod_initfree, node);
3577		module_memfree(initfree->module_init);
3578		kfree(initfree);
3579	}
3580}
3581
3582static int __init modules_wq_init(void)
3583{
3584	INIT_WORK(&init_free_wq, do_free_init);
3585	init_llist_head(&init_free_list);
3586	return 0;
3587}
3588module_init(modules_wq_init);
3589
3590/*
3591 * This is where the real work happens.
3592 *
3593 * Keep it uninlined to provide a reliable breakpoint target, e.g. for the gdb
3594 * helper command 'lx-symbols'.
3595 */
3596static noinline int do_init_module(struct module *mod)
3597{
3598	int ret = 0;
3599	struct mod_initfree *freeinit;
3600
3601	freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
3602	if (!freeinit) {
3603		ret = -ENOMEM;
3604		goto fail;
3605	}
3606	freeinit->module_init = mod->init_layout.base;
3607
3608	/*
3609	 * We want to find out whether @mod uses async during init.  Clear
3610	 * PF_USED_ASYNC.  async_schedule*() will set it.
3611	 */
3612	current->flags &= ~PF_USED_ASYNC;
3613
3614	do_mod_ctors(mod);
3615	/* Start the module */
3616	if (mod->init != NULL)
3617		ret = do_one_initcall(mod->init);
3618	if (ret < 0) {
3619		goto fail_free_freeinit;
3620	}
3621	if (ret > 0) {
3622		pr_warn("%s: '%s'->init suspiciously returned %d, it should "
3623			"follow 0/-E convention\n"
3624			"%s: loading module anyway...\n",
3625			__func__, mod->name, ret, __func__);
3626		dump_stack();
3627	}
3628
3629	/* Now it's a first class citizen! */
3630	mod->state = MODULE_STATE_LIVE;
3631	blocking_notifier_call_chain(&module_notify_list,
3632				     MODULE_STATE_LIVE, mod);
3633
3634	/*
3635	 * We need to finish all async code before the module init sequence
3636	 * is done.  This has potential to deadlock.  For example, a newly
3637	 * detected block device can trigger request_module() of the
3638	 * default iosched from async probing task.  Once userland helper
3639	 * reaches here, async_synchronize_full() will wait on the async
3640	 * task waiting on request_module() and deadlock.
3641	 *
3642	 * This deadlock is avoided by perfomring async_synchronize_full()
3643	 * iff module init queued any async jobs.  This isn't a full
3644	 * solution as it will deadlock the same if module loading from
3645	 * async jobs nests more than once; however, due to the various
3646	 * constraints, this hack seems to be the best option for now.
3647	 * Please refer to the following thread for details.
3648	 *
3649	 * http://thread.gmane.org/gmane.linux.kernel/1420814
3650	 */
3651	if (!mod->async_probe_requested && (current->flags & PF_USED_ASYNC))
3652		async_synchronize_full();
3653
3654	ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
3655			mod->init_layout.size);
3656	mutex_lock(&module_mutex);
3657	/* Drop initial reference. */
3658	module_put(mod);
3659	trim_init_extable(mod);
3660#ifdef CONFIG_KALLSYMS
3661	/* Switch to core kallsyms now init is done: kallsyms may be walking! */
3662	rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
3663#endif
3664	module_enable_ro(mod, true);
3665	mod_tree_remove_init(mod);
 
3666	module_arch_freeing_init(mod);
3667	mod->init_layout.base = NULL;
3668	mod->init_layout.size = 0;
3669	mod->init_layout.ro_size = 0;
3670	mod->init_layout.ro_after_init_size = 0;
3671	mod->init_layout.text_size = 0;
3672	/*
3673	 * We want to free module_init, but be aware that kallsyms may be
3674	 * walking this with preempt disabled.  In all the failure paths, we
3675	 * call synchronize_rcu(), but we don't want to slow down the success
3676	 * path. module_memfree() cannot be called in an interrupt, so do the
3677	 * work and call synchronize_rcu() in a work queue.
3678	 *
3679	 * Note that module_alloc() on most architectures creates W+X page
3680	 * mappings which won't be cleaned up until do_free_init() runs.  Any
3681	 * code such as mark_rodata_ro() which depends on those mappings to
3682	 * be cleaned up needs to sync with the queued work - ie
3683	 * rcu_barrier()
3684	 */
3685	if (llist_add(&freeinit->node, &init_free_list))
3686		schedule_work(&init_free_wq);
3687
3688	mutex_unlock(&module_mutex);
3689	wake_up_all(&module_wq);
3690
3691	return 0;
3692
3693fail_free_freeinit:
3694	kfree(freeinit);
3695fail:
3696	/* Try to protect us from buggy refcounters. */
3697	mod->state = MODULE_STATE_GOING;
3698	synchronize_rcu();
3699	module_put(mod);
3700	blocking_notifier_call_chain(&module_notify_list,
3701				     MODULE_STATE_GOING, mod);
3702	klp_module_going(mod);
3703	ftrace_release_mod(mod);
3704	free_module(mod);
3705	wake_up_all(&module_wq);
3706	return ret;
3707}
3708
3709static int may_init_module(void)
3710{
3711	if (!capable(CAP_SYS_MODULE) || modules_disabled)
3712		return -EPERM;
3713
3714	return 0;
3715}
3716
3717/*
3718 * We try to place it in the list now to make sure it's unique before
3719 * we dedicate too many resources.  In particular, temporary percpu
3720 * memory exhaustion.
3721 */
3722static int add_unformed_module(struct module *mod)
3723{
3724	int err;
3725	struct module *old;
3726
3727	mod->state = MODULE_STATE_UNFORMED;
3728
3729again:
3730	mutex_lock(&module_mutex);
3731	old = find_module_all(mod->name, strlen(mod->name), true);
3732	if (old != NULL) {
3733		if (old->state != MODULE_STATE_LIVE) {
 
3734			/* Wait in case it fails to load. */
3735			mutex_unlock(&module_mutex);
3736			err = wait_event_interruptible(module_wq,
3737					       finished_loading(mod->name));
3738			if (err)
3739				goto out_unlocked;
3740			goto again;
3741		}
3742		err = -EEXIST;
3743		goto out;
3744	}
3745	mod_update_bounds(mod);
3746	list_add_rcu(&mod->list, &modules);
3747	mod_tree_insert(mod);
3748	err = 0;
3749
3750out:
3751	mutex_unlock(&module_mutex);
3752out_unlocked:
3753	return err;
3754}
3755
3756static int complete_formation(struct module *mod, struct load_info *info)
3757{
3758	int err;
3759
3760	mutex_lock(&module_mutex);
3761
3762	/* Find duplicate symbols (must be called under lock). */
3763	err = verify_exported_symbols(mod);
3764	if (err < 0)
3765		goto out;
3766
3767	/* This relies on module_mutex for list integrity. */
3768	module_bug_finalize(info->hdr, info->sechdrs, mod);
3769
3770	module_enable_ro(mod, false);
3771	module_enable_nx(mod);
3772	module_enable_x(mod);
3773
3774	/* Mark state as coming so strong_try_module_get() ignores us,
3775	 * but kallsyms etc. can see us. */
3776	mod->state = MODULE_STATE_COMING;
3777	mutex_unlock(&module_mutex);
3778
3779	return 0;
3780
3781out:
3782	mutex_unlock(&module_mutex);
3783	return err;
3784}
3785
3786static int prepare_coming_module(struct module *mod)
3787{
3788	int err;
3789
3790	ftrace_module_enable(mod);
3791	err = klp_module_coming(mod);
3792	if (err)
3793		return err;
3794
3795	blocking_notifier_call_chain(&module_notify_list,
3796				     MODULE_STATE_COMING, mod);
3797	return 0;
3798}
3799
3800static int unknown_module_param_cb(char *param, char *val, const char *modname,
3801				   void *arg)
3802{
3803	struct module *mod = arg;
3804	int ret;
3805
3806	if (strcmp(param, "async_probe") == 0) {
3807		mod->async_probe_requested = true;
3808		return 0;
3809	}
3810
3811	/* Check for magic 'dyndbg' arg */
3812	ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3813	if (ret != 0)
3814		pr_warn("%s: unknown parameter '%s' ignored\n", modname, param);
3815	return 0;
3816}
3817
3818/* Allocate and load the module: note that size of section 0 is always
3819   zero, and we rely on this for optional sections. */
3820static int load_module(struct load_info *info, const char __user *uargs,
3821		       int flags)
3822{
3823	struct module *mod;
3824	long err = 0;
3825	char *after_dashes;
3826
3827	err = elf_header_check(info);
3828	if (err)
3829		goto free_copy;
3830
3831	err = setup_load_info(info, flags);
3832	if (err)
3833		goto free_copy;
3834
3835	if (blacklisted(info->name)) {
3836		err = -EPERM;
3837		goto free_copy;
3838	}
3839
3840	err = module_sig_check(info, flags);
3841	if (err)
3842		goto free_copy;
3843
3844	err = rewrite_section_headers(info, flags);
3845	if (err)
3846		goto free_copy;
3847
3848	/* Check module struct version now, before we try to use module. */
3849	if (!check_modstruct_version(info, info->mod)) {
3850		err = -ENOEXEC;
3851		goto free_copy;
3852	}
3853
3854	/* Figure out module layout, and allocate all the memory. */
3855	mod = layout_and_allocate(info, flags);
3856	if (IS_ERR(mod)) {
3857		err = PTR_ERR(mod);
3858		goto free_copy;
3859	}
3860
3861	audit_log_kern_module(mod->name);
3862
3863	/* Reserve our place in the list. */
3864	err = add_unformed_module(mod);
3865	if (err)
3866		goto free_module;
3867
3868#ifdef CONFIG_MODULE_SIG
3869	mod->sig_ok = info->sig_ok;
3870	if (!mod->sig_ok) {
3871		pr_notice_once("%s: module verification failed: signature "
3872			       "and/or required key missing - tainting "
3873			       "kernel\n", mod->name);
3874		add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
3875	}
3876#endif
3877
3878	/* To avoid stressing percpu allocator, do this once we're unique. */
3879	err = percpu_modalloc(mod, info);
3880	if (err)
3881		goto unlink_mod;
3882
3883	/* Now module is in final location, initialize linked lists, etc. */
3884	err = module_unload_init(mod);
3885	if (err)
3886		goto unlink_mod;
3887
3888	init_param_lock(mod);
3889
3890	/* Now we've got everything in the final locations, we can
3891	 * find optional sections. */
3892	err = find_module_sections(mod, info);
3893	if (err)
3894		goto free_unload;
3895
3896	err = check_module_license_and_versions(mod);
3897	if (err)
3898		goto free_unload;
3899
3900	/* Set up MODINFO_ATTR fields */
3901	setup_modinfo(mod, info);
3902
3903	/* Fix up syms, so that st_value is a pointer to location. */
3904	err = simplify_symbols(mod, info);
3905	if (err < 0)
3906		goto free_modinfo;
3907
3908	err = apply_relocations(mod, info);
3909	if (err < 0)
3910		goto free_modinfo;
3911
3912	err = post_relocation(mod, info);
3913	if (err < 0)
3914		goto free_modinfo;
3915
3916	flush_module_icache(mod);
3917
3918	/* Now copy in args */
3919	mod->args = strndup_user(uargs, ~0UL >> 1);
3920	if (IS_ERR(mod->args)) {
3921		err = PTR_ERR(mod->args);
3922		goto free_arch_cleanup;
3923	}
3924
3925	dynamic_debug_setup(mod, info->debug, info->num_debug);
3926
3927	/* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3928	ftrace_module_init(mod);
3929
3930	/* Finally it's fully formed, ready to start executing. */
3931	err = complete_formation(mod, info);
3932	if (err)
3933		goto ddebug_cleanup;
3934
3935	err = prepare_coming_module(mod);
3936	if (err)
3937		goto bug_cleanup;
3938
3939	/* Module is ready to execute: parsing args may do that. */
3940	after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3941				  -32768, 32767, mod,
3942				  unknown_module_param_cb);
3943	if (IS_ERR(after_dashes)) {
3944		err = PTR_ERR(after_dashes);
3945		goto coming_cleanup;
3946	} else if (after_dashes) {
3947		pr_warn("%s: parameters '%s' after `--' ignored\n",
3948		       mod->name, after_dashes);
3949	}
3950
3951	/* Link in to sysfs. */
3952	err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3953	if (err < 0)
3954		goto coming_cleanup;
3955
3956	if (is_livepatch_module(mod)) {
3957		err = copy_module_elf(mod, info);
3958		if (err < 0)
3959			goto sysfs_cleanup;
3960	}
3961
3962	/* Get rid of temporary copy. */
3963	free_copy(info);
3964
3965	/* Done! */
3966	trace_module_load(mod);
3967
3968	return do_init_module(mod);
3969
3970 sysfs_cleanup:
3971	mod_sysfs_teardown(mod);
3972 coming_cleanup:
3973	mod->state = MODULE_STATE_GOING;
3974	destroy_params(mod->kp, mod->num_kp);
3975	blocking_notifier_call_chain(&module_notify_list,
3976				     MODULE_STATE_GOING, mod);
3977	klp_module_going(mod);
3978 bug_cleanup:
3979	/* module_bug_cleanup needs module_mutex protection */
3980	mutex_lock(&module_mutex);
3981	module_bug_cleanup(mod);
3982	mutex_unlock(&module_mutex);
3983
 
 
 
 
3984 ddebug_cleanup:
3985	ftrace_release_mod(mod);
3986	dynamic_debug_remove(mod, info->debug);
3987	synchronize_rcu();
3988	kfree(mod->args);
3989 free_arch_cleanup:
3990	module_arch_cleanup(mod);
3991 free_modinfo:
3992	free_modinfo(mod);
3993 free_unload:
3994	module_unload_free(mod);
3995 unlink_mod:
3996	mutex_lock(&module_mutex);
3997	/* Unlink carefully: kallsyms could be walking list. */
3998	list_del_rcu(&mod->list);
3999	mod_tree_remove(mod);
4000	wake_up_all(&module_wq);
4001	/* Wait for RCU-sched synchronizing before releasing mod->list. */
4002	synchronize_rcu();
4003	mutex_unlock(&module_mutex);
4004 free_module:
4005	/* Free lock-classes; relies on the preceding sync_rcu() */
4006	lockdep_free_key_range(mod->core_layout.base, mod->core_layout.size);
4007
4008	module_deallocate(mod, info);
4009 free_copy:
4010	free_copy(info);
4011	return err;
4012}
4013
4014SYSCALL_DEFINE3(init_module, void __user *, umod,
4015		unsigned long, len, const char __user *, uargs)
4016{
4017	int err;
4018	struct load_info info = { };
4019
4020	err = may_init_module();
4021	if (err)
4022		return err;
4023
4024	pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
4025	       umod, len, uargs);
4026
4027	err = copy_module_from_user(umod, len, &info);
4028	if (err)
4029		return err;
4030
4031	return load_module(&info, uargs, 0);
4032}
4033
4034SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
4035{
4036	struct load_info info = { };
4037	loff_t size;
4038	void *hdr;
4039	int err;
4040
4041	err = may_init_module();
4042	if (err)
4043		return err;
4044
4045	pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
4046
4047	if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
4048		      |MODULE_INIT_IGNORE_VERMAGIC))
4049		return -EINVAL;
4050
4051	err = kernel_read_file_from_fd(fd, &hdr, &size, INT_MAX,
4052				       READING_MODULE);
4053	if (err)
4054		return err;
4055	info.hdr = hdr;
4056	info.len = size;
4057
4058	return load_module(&info, uargs, flags);
4059}
4060
4061static inline int within(unsigned long addr, void *start, unsigned long size)
4062{
4063	return ((void *)addr >= start && (void *)addr < start + size);
4064}
4065
4066#ifdef CONFIG_KALLSYMS
4067/*
4068 * This ignores the intensely annoying "mapping symbols" found
4069 * in ARM ELF files: $a, $t and $d.
4070 */
4071static inline int is_arm_mapping_symbol(const char *str)
4072{
4073	if (str[0] == '.' && str[1] == 'L')
4074		return true;
4075	return str[0] == '$' && strchr("axtd", str[1])
4076	       && (str[2] == '\0' || str[2] == '.');
4077}
4078
4079static const char *kallsyms_symbol_name(struct mod_kallsyms *kallsyms, unsigned int symnum)
4080{
4081	return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
4082}
4083
4084/*
4085 * Given a module and address, find the corresponding symbol and return its name
4086 * while providing its size and offset if needed.
4087 */
4088static const char *find_kallsyms_symbol(struct module *mod,
4089					unsigned long addr,
4090					unsigned long *size,
4091					unsigned long *offset)
4092{
4093	unsigned int i, best = 0;
4094	unsigned long nextval, bestval;
4095	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4096
4097	/* At worse, next value is at end of module */
4098	if (within_module_init(addr, mod))
4099		nextval = (unsigned long)mod->init_layout.base+mod->init_layout.text_size;
4100	else
4101		nextval = (unsigned long)mod->core_layout.base+mod->core_layout.text_size;
4102
4103	bestval = kallsyms_symbol_value(&kallsyms->symtab[best]);
4104
4105	/* Scan for closest preceding symbol, and next symbol. (ELF
4106	   starts real symbols at 1). */
4107	for (i = 1; i < kallsyms->num_symtab; i++) {
4108		const Elf_Sym *sym = &kallsyms->symtab[i];
4109		unsigned long thisval = kallsyms_symbol_value(sym);
4110
4111		if (sym->st_shndx == SHN_UNDEF)
4112			continue;
4113
4114		/* We ignore unnamed symbols: they're uninformative
4115		 * and inserted at a whim. */
4116		if (*kallsyms_symbol_name(kallsyms, i) == '\0'
4117		    || is_arm_mapping_symbol(kallsyms_symbol_name(kallsyms, i)))
4118			continue;
4119
4120		if (thisval <= addr && thisval > bestval) {
 
4121			best = i;
4122			bestval = thisval;
4123		}
4124		if (thisval > addr && thisval < nextval)
4125			nextval = thisval;
4126	}
4127
4128	if (!best)
4129		return NULL;
4130
4131	if (size)
4132		*size = nextval - bestval;
4133	if (offset)
4134		*offset = addr - bestval;
4135
4136	return kallsyms_symbol_name(kallsyms, best);
4137}
4138
4139void * __weak dereference_module_function_descriptor(struct module *mod,
4140						     void *ptr)
4141{
4142	return ptr;
4143}
4144
4145/* For kallsyms to ask for address resolution.  NULL means not found.  Careful
4146 * not to lock to avoid deadlock on oopses, simply disable preemption. */
4147const char *module_address_lookup(unsigned long addr,
4148			    unsigned long *size,
4149			    unsigned long *offset,
4150			    char **modname,
4151			    char *namebuf)
4152{
4153	const char *ret = NULL;
4154	struct module *mod;
4155
4156	preempt_disable();
4157	mod = __module_address(addr);
4158	if (mod) {
4159		if (modname)
4160			*modname = mod->name;
4161
4162		ret = find_kallsyms_symbol(mod, addr, size, offset);
4163	}
4164	/* Make a copy in here where it's safe */
4165	if (ret) {
4166		strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
4167		ret = namebuf;
4168	}
4169	preempt_enable();
4170
4171	return ret;
4172}
4173
4174int lookup_module_symbol_name(unsigned long addr, char *symname)
4175{
4176	struct module *mod;
4177
4178	preempt_disable();
4179	list_for_each_entry_rcu(mod, &modules, list) {
4180		if (mod->state == MODULE_STATE_UNFORMED)
4181			continue;
4182		if (within_module(addr, mod)) {
4183			const char *sym;
4184
4185			sym = find_kallsyms_symbol(mod, addr, NULL, NULL);
4186			if (!sym)
4187				goto out;
4188
4189			strlcpy(symname, sym, KSYM_NAME_LEN);
4190			preempt_enable();
4191			return 0;
4192		}
4193	}
4194out:
4195	preempt_enable();
4196	return -ERANGE;
4197}
4198
4199int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
4200			unsigned long *offset, char *modname, char *name)
4201{
4202	struct module *mod;
4203
4204	preempt_disable();
4205	list_for_each_entry_rcu(mod, &modules, list) {
4206		if (mod->state == MODULE_STATE_UNFORMED)
4207			continue;
4208		if (within_module(addr, mod)) {
4209			const char *sym;
4210
4211			sym = find_kallsyms_symbol(mod, addr, size, offset);
4212			if (!sym)
4213				goto out;
4214			if (modname)
4215				strlcpy(modname, mod->name, MODULE_NAME_LEN);
4216			if (name)
4217				strlcpy(name, sym, KSYM_NAME_LEN);
4218			preempt_enable();
4219			return 0;
4220		}
4221	}
4222out:
4223	preempt_enable();
4224	return -ERANGE;
4225}
4226
4227int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
4228			char *name, char *module_name, int *exported)
4229{
4230	struct module *mod;
4231
4232	preempt_disable();
4233	list_for_each_entry_rcu(mod, &modules, list) {
4234		struct mod_kallsyms *kallsyms;
4235
4236		if (mod->state == MODULE_STATE_UNFORMED)
4237			continue;
4238		kallsyms = rcu_dereference_sched(mod->kallsyms);
4239		if (symnum < kallsyms->num_symtab) {
4240			const Elf_Sym *sym = &kallsyms->symtab[symnum];
4241
4242			*value = kallsyms_symbol_value(sym);
4243			*type = kallsyms->typetab[symnum];
4244			strlcpy(name, kallsyms_symbol_name(kallsyms, symnum), KSYM_NAME_LEN);
4245			strlcpy(module_name, mod->name, MODULE_NAME_LEN);
4246			*exported = is_exported(name, *value, mod);
4247			preempt_enable();
4248			return 0;
4249		}
4250		symnum -= kallsyms->num_symtab;
4251	}
4252	preempt_enable();
4253	return -ERANGE;
4254}
4255
4256/* Given a module and name of symbol, find and return the symbol's value */
4257static unsigned long find_kallsyms_symbol_value(struct module *mod, const char *name)
4258{
4259	unsigned int i;
4260	struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
4261
4262	for (i = 0; i < kallsyms->num_symtab; i++) {
4263		const Elf_Sym *sym = &kallsyms->symtab[i];
4264
4265		if (strcmp(name, kallsyms_symbol_name(kallsyms, i)) == 0 &&
4266		    sym->st_shndx != SHN_UNDEF)
4267			return kallsyms_symbol_value(sym);
4268	}
4269	return 0;
4270}
4271
4272/* Look for this name: can be of form module:name. */
4273unsigned long module_kallsyms_lookup_name(const char *name)
4274{
4275	struct module *mod;
4276	char *colon;
4277	unsigned long ret = 0;
4278
4279	/* Don't lock: we're in enough trouble already. */
4280	preempt_disable();
4281	if ((colon = strnchr(name, MODULE_NAME_LEN, ':')) != NULL) {
4282		if ((mod = find_module_all(name, colon - name, false)) != NULL)
4283			ret = find_kallsyms_symbol_value(mod, colon+1);
4284	} else {
4285		list_for_each_entry_rcu(mod, &modules, list) {
4286			if (mod->state == MODULE_STATE_UNFORMED)
4287				continue;
4288			if ((ret = find_kallsyms_symbol_value(mod, name)) != 0)
4289				break;
4290		}
4291	}
4292	preempt_enable();
4293	return ret;
4294}
4295
4296int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
4297					     struct module *, unsigned long),
4298				   void *data)
4299{
4300	struct module *mod;
4301	unsigned int i;
4302	int ret;
4303
4304	module_assert_mutex();
4305
4306	list_for_each_entry(mod, &modules, list) {
4307		/* We hold module_mutex: no need for rcu_dereference_sched */
4308		struct mod_kallsyms *kallsyms = mod->kallsyms;
4309
4310		if (mod->state == MODULE_STATE_UNFORMED)
4311			continue;
4312		for (i = 0; i < kallsyms->num_symtab; i++) {
4313			const Elf_Sym *sym = &kallsyms->symtab[i];
4314
4315			if (sym->st_shndx == SHN_UNDEF)
4316				continue;
4317
4318			ret = fn(data, kallsyms_symbol_name(kallsyms, i),
4319				 mod, kallsyms_symbol_value(sym));
4320			if (ret != 0)
4321				return ret;
4322		}
4323	}
4324	return 0;
4325}
4326#endif /* CONFIG_KALLSYMS */
4327
4328/* Maximum number of characters written by module_flags() */
4329#define MODULE_FLAGS_BUF_SIZE (TAINT_FLAGS_COUNT + 4)
4330
4331/* Keep in sync with MODULE_FLAGS_BUF_SIZE !!! */
4332static char *module_flags(struct module *mod, char *buf)
4333{
4334	int bx = 0;
4335
4336	BUG_ON(mod->state == MODULE_STATE_UNFORMED);
4337	if (mod->taints ||
4338	    mod->state == MODULE_STATE_GOING ||
4339	    mod->state == MODULE_STATE_COMING) {
4340		buf[bx++] = '(';
4341		bx += module_flags_taint(mod, buf + bx);
4342		/* Show a - for module-is-being-unloaded */
4343		if (mod->state == MODULE_STATE_GOING)
4344			buf[bx++] = '-';
4345		/* Show a + for module-is-being-loaded */
4346		if (mod->state == MODULE_STATE_COMING)
4347			buf[bx++] = '+';
4348		buf[bx++] = ')';
4349	}
4350	buf[bx] = '\0';
4351
4352	return buf;
4353}
4354
4355#ifdef CONFIG_PROC_FS
4356/* Called by the /proc file system to return a list of modules. */
4357static void *m_start(struct seq_file *m, loff_t *pos)
4358{
4359	mutex_lock(&module_mutex);
4360	return seq_list_start(&modules, *pos);
4361}
4362
4363static void *m_next(struct seq_file *m, void *p, loff_t *pos)
4364{
4365	return seq_list_next(p, &modules, pos);
4366}
4367
4368static void m_stop(struct seq_file *m, void *p)
4369{
4370	mutex_unlock(&module_mutex);
4371}
4372
4373static int m_show(struct seq_file *m, void *p)
4374{
4375	struct module *mod = list_entry(p, struct module, list);
4376	char buf[MODULE_FLAGS_BUF_SIZE];
4377	void *value;
4378
4379	/* We always ignore unformed modules. */
4380	if (mod->state == MODULE_STATE_UNFORMED)
4381		return 0;
4382
4383	seq_printf(m, "%s %u",
4384		   mod->name, mod->init_layout.size + mod->core_layout.size);
4385	print_unload_info(m, mod);
4386
4387	/* Informative for users. */
4388	seq_printf(m, " %s",
4389		   mod->state == MODULE_STATE_GOING ? "Unloading" :
4390		   mod->state == MODULE_STATE_COMING ? "Loading" :
4391		   "Live");
4392	/* Used by oprofile and other similar tools. */
4393	value = m->private ? NULL : mod->core_layout.base;
4394	seq_printf(m, " 0x%px", value);
4395
4396	/* Taints info */
4397	if (mod->taints)
4398		seq_printf(m, " %s", module_flags(mod, buf));
4399
4400	seq_puts(m, "\n");
4401	return 0;
4402}
4403
4404/* Format: modulename size refcount deps address
4405
4406   Where refcount is a number or -, and deps is a comma-separated list
4407   of depends or -.
4408*/
4409static const struct seq_operations modules_op = {
4410	.start	= m_start,
4411	.next	= m_next,
4412	.stop	= m_stop,
4413	.show	= m_show
4414};
4415
4416/*
4417 * This also sets the "private" pointer to non-NULL if the
4418 * kernel pointers should be hidden (so you can just test
4419 * "m->private" to see if you should keep the values private).
4420 *
4421 * We use the same logic as for /proc/kallsyms.
4422 */
4423static int modules_open(struct inode *inode, struct file *file)
4424{
4425	int err = seq_open(file, &modules_op);
4426
4427	if (!err) {
4428		struct seq_file *m = file->private_data;
4429		m->private = kallsyms_show_value(file->f_cred) ? NULL : (void *)8ul;
4430	}
4431
4432	return err;
4433}
4434
4435static const struct proc_ops modules_proc_ops = {
4436	.proc_flags	= PROC_ENTRY_PERMANENT,
4437	.proc_open	= modules_open,
4438	.proc_read	= seq_read,
4439	.proc_lseek	= seq_lseek,
4440	.proc_release	= seq_release,
4441};
4442
4443static int __init proc_modules_init(void)
4444{
4445	proc_create("modules", 0, NULL, &modules_proc_ops);
4446	return 0;
4447}
4448module_init(proc_modules_init);
4449#endif
4450
4451/* Given an address, look for it in the module exception tables. */
4452const struct exception_table_entry *search_module_extables(unsigned long addr)
4453{
4454	const struct exception_table_entry *e = NULL;
4455	struct module *mod;
4456
4457	preempt_disable();
4458	mod = __module_address(addr);
4459	if (!mod)
4460		goto out;
4461
4462	if (!mod->num_exentries)
4463		goto out;
4464
4465	e = search_extable(mod->extable,
4466			   mod->num_exentries,
4467			   addr);
4468out:
4469	preempt_enable();
4470
4471	/*
4472	 * Now, if we found one, we are running inside it now, hence
4473	 * we cannot unload the module, hence no refcnt needed.
4474	 */
4475	return e;
4476}
4477
4478/*
4479 * is_module_address - is this address inside a module?
4480 * @addr: the address to check.
4481 *
4482 * See is_module_text_address() if you simply want to see if the address
4483 * is code (not data).
4484 */
4485bool is_module_address(unsigned long addr)
4486{
4487	bool ret;
4488
4489	preempt_disable();
4490	ret = __module_address(addr) != NULL;
4491	preempt_enable();
4492
4493	return ret;
4494}
4495
4496/*
4497 * __module_address - get the module which contains an address.
4498 * @addr: the address.
4499 *
4500 * Must be called with preempt disabled or module mutex held so that
4501 * module doesn't get freed during this.
4502 */
4503struct module *__module_address(unsigned long addr)
4504{
4505	struct module *mod;
4506
4507	if (addr < module_addr_min || addr > module_addr_max)
4508		return NULL;
4509
4510	module_assert_mutex_or_preempt();
4511
4512	mod = mod_find(addr);
4513	if (mod) {
4514		BUG_ON(!within_module(addr, mod));
4515		if (mod->state == MODULE_STATE_UNFORMED)
4516			mod = NULL;
4517	}
4518	return mod;
4519}
 
4520
4521/*
4522 * is_module_text_address - is this address inside module code?
4523 * @addr: the address to check.
4524 *
4525 * See is_module_address() if you simply want to see if the address is
4526 * anywhere in a module.  See kernel_text_address() for testing if an
4527 * address corresponds to kernel or module code.
4528 */
4529bool is_module_text_address(unsigned long addr)
4530{
4531	bool ret;
4532
4533	preempt_disable();
4534	ret = __module_text_address(addr) != NULL;
4535	preempt_enable();
4536
4537	return ret;
4538}
4539
4540/*
4541 * __module_text_address - get the module whose code contains an address.
4542 * @addr: the address.
4543 *
4544 * Must be called with preempt disabled or module mutex held so that
4545 * module doesn't get freed during this.
4546 */
4547struct module *__module_text_address(unsigned long addr)
4548{
4549	struct module *mod = __module_address(addr);
4550	if (mod) {
4551		/* Make sure it's within the text section. */
4552		if (!within(addr, mod->init_layout.base, mod->init_layout.text_size)
4553		    && !within(addr, mod->core_layout.base, mod->core_layout.text_size))
4554			mod = NULL;
4555	}
4556	return mod;
4557}
 
4558
4559/* Don't grab lock, we're oopsing. */
4560void print_modules(void)
4561{
4562	struct module *mod;
4563	char buf[MODULE_FLAGS_BUF_SIZE];
4564
4565	printk(KERN_DEFAULT "Modules linked in:");
4566	/* Most callers should already have preempt disabled, but make sure */
4567	preempt_disable();
4568	list_for_each_entry_rcu(mod, &modules, list) {
4569		if (mod->state == MODULE_STATE_UNFORMED)
4570			continue;
4571		pr_cont(" %s%s", mod->name, module_flags(mod, buf));
4572	}
4573	preempt_enable();
4574	if (last_unloaded_module[0])
4575		pr_cont(" [last unloaded: %s]", last_unloaded_module);
4576	pr_cont("\n");
4577}
4578
4579#ifdef CONFIG_MODVERSIONS
4580/* Generate the signature for all relevant module structures here.
4581 * If these change, we don't want to try to parse the module. */
4582void module_layout(struct module *mod,
4583		   struct modversion_info *ver,
4584		   struct kernel_param *kp,
4585		   struct kernel_symbol *ks,
4586		   struct tracepoint * const *tp)
4587{
4588}
4589EXPORT_SYMBOL(module_layout);
4590#endif