mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 21:01:29 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge misc updates from Andrew Morton: - kasan updates - procfs - lib/bitmap updates - other lib/ updates - checkpatch tweaks - rapidio - ubsan - pipe fixes and cleanups - lots of other misc bits * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (114 commits) Documentation/sysctl/user.txt: fix typo MAINTAINERS: update ARM/QUALCOMM SUPPORT patterns MAINTAINERS: update various PALM patterns MAINTAINERS: update "ARM/OXNAS platform support" patterns MAINTAINERS: update Cortina/Gemini patterns MAINTAINERS: remove ARM/CLKDEV SUPPORT file pattern MAINTAINERS: remove ANDROID ION pattern mm: docs: add blank lines to silence sphinx "Unexpected indentation" errors mm: docs: fix parameter names mismatch mm: docs: fixup punctuation pipe: read buffer limits atomically pipe: simplify round_pipe_size() pipe: reject F_SETPIPE_SZ with size over UINT_MAX pipe: fix off-by-one error when checking buffer limits pipe: actually allow root to exceed the pipe buffer limits pipe, sysctl: remove pipe_proc_fn() pipe, sysctl: drop 'min' parameter from pipe-max-size converter kasan: rework Kconfig settings crash_dump: is_kdump_kernel can be boolean kernel/mutex: mutex_is_locked can be boolean ...
This commit is contained in:
commit
a2e5790d84
@ -3,7 +3,7 @@ Documentation for /proc/sys/user/* kernel version 4.9.0
|
||||
|
||||
==============================================================
|
||||
|
||||
This file contains the documetation for the sysctl files in
|
||||
This file contains the documentation for the sysctl files in
|
||||
/proc/sys/user.
|
||||
|
||||
The files in this directory can be used to override the default
|
||||
|
29
MAINTAINERS
29
MAINTAINERS
@ -903,7 +903,6 @@ L: devel@driverdev.osuosl.org
|
||||
S: Supported
|
||||
F: drivers/staging/android/ion
|
||||
F: drivers/staging/android/uapi/ion.h
|
||||
F: drivers/staging/android/uapi/ion_test.h
|
||||
|
||||
AOA (Apple Onboard Audio) ALSA DRIVER
|
||||
M: Johannes Berg <johannes@sipsolutions.net>
|
||||
@ -1308,7 +1307,6 @@ M: Russell King <linux@armlinux.org.uk>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
|
||||
F: arch/arm/include/asm/clkdev.h
|
||||
F: drivers/clk/clkdev.c
|
||||
|
||||
ARM/COMPULAB CM-X270/EM-X270 and CM-X300 MACHINE SUPPORT
|
||||
@ -1360,7 +1358,7 @@ F: Documentation/devicetree/bindings/pinctrl/cortina,gemini-pinctrl.txt
|
||||
F: Documentation/devicetree/bindings/net/cortina,gemini-ethernet.txt
|
||||
F: Documentation/devicetree/bindings/rtc/faraday,ftrtc010.txt
|
||||
F: arch/arm/mach-gemini/
|
||||
F: drivers/net/ethernet/cortina/gemini/*
|
||||
F: drivers/net/ethernet/cortina/
|
||||
F: drivers/pinctrl/pinctrl-gemini.c
|
||||
F: drivers/rtc/rtc-ftrtc010.c
|
||||
|
||||
@ -1737,9 +1735,7 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-oxnas@lists.tuxfamily.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-oxnas/
|
||||
F: arch/arm/boot/dts/ox8*.dtsi
|
||||
F: arch/arm/boot/dts/wd-mbwe.dts
|
||||
F: arch/arm/boot/dts/cloudengines-pogoplug-series-3.dts
|
||||
F: arch/arm/boot/dts/ox8*.dts*
|
||||
N: oxnas
|
||||
|
||||
ARM/PALM TREO SUPPORT
|
||||
@ -1747,8 +1743,7 @@ M: Tomas Cech <sleep_walker@suse.com>
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
W: http://hackndev.com
|
||||
S: Maintained
|
||||
F: arch/arm/mach-pxa/include/mach/palmtreo.h
|
||||
F: arch/arm/mach-pxa/palmtreo.c
|
||||
F: arch/arm/mach-pxa/palmtreo.*
|
||||
|
||||
ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
|
||||
M: Marek Vasut <marek.vasut@gmail.com>
|
||||
@ -1757,12 +1752,10 @@ W: http://hackndev.com
|
||||
S: Maintained
|
||||
F: arch/arm/mach-pxa/include/mach/palmtx.h
|
||||
F: arch/arm/mach-pxa/palmtx.c
|
||||
F: arch/arm/mach-pxa/include/mach/palmt5.h
|
||||
F: arch/arm/mach-pxa/palmt5.c
|
||||
F: arch/arm/mach-pxa/palmt5.*
|
||||
F: arch/arm/mach-pxa/include/mach/palmld.h
|
||||
F: arch/arm/mach-pxa/palmld.c
|
||||
F: arch/arm/mach-pxa/include/mach/palmte2.h
|
||||
F: arch/arm/mach-pxa/palmte2.c
|
||||
F: arch/arm/mach-pxa/palmte2.*
|
||||
F: arch/arm/mach-pxa/include/mach/palmtc.h
|
||||
F: arch/arm/mach-pxa/palmtc.c
|
||||
|
||||
@ -1771,8 +1764,7 @@ M: Sergey Lapin <slapin@ossfans.org>
|
||||
L: linux-arm-kernel@lists.infradead.org
|
||||
W: http://hackndev.com
|
||||
S: Maintained
|
||||
F: arch/arm/mach-pxa/include/mach/palmz72.h
|
||||
F: arch/arm/mach-pxa/palmz72.c
|
||||
F: arch/arm/mach-pxa/palmz72.*
|
||||
|
||||
ARM/PLEB SUPPORT
|
||||
M: Peter Chubb <pleb@gelato.unsw.edu.au>
|
||||
@ -1801,7 +1793,6 @@ F: drivers/clk/qcom/
|
||||
F: drivers/dma/qcom/
|
||||
F: drivers/soc/qcom/
|
||||
F: drivers/spi/spi-qup.c
|
||||
F: drivers/tty/serial/msm_serial.h
|
||||
F: drivers/tty/serial/msm_serial.c
|
||||
F: drivers/*/pm8???-*
|
||||
F: drivers/mfd/ssbi.c
|
||||
@ -3567,7 +3558,7 @@ F: drivers/media/platform/coda/
|
||||
|
||||
COMMON CLK FRAMEWORK
|
||||
M: Michael Turquette <mturquette@baylibre.com>
|
||||
M: Stephen Boyd <sboyd@codeaurora.org>
|
||||
M: Stephen Boyd <sboyd@kernel.org>
|
||||
L: linux-clk@vger.kernel.org
|
||||
Q: http://patchwork.kernel.org/project/linux-clk/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
|
||||
@ -10284,7 +10275,7 @@ F: include/uapi/linux/openvswitch.h
|
||||
OPERATING PERFORMANCE POINTS (OPP)
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
M: Nishanth Menon <nm@ti.com>
|
||||
M: Stephen Boyd <sboyd@codeaurora.org>
|
||||
M: Stephen Boyd <sboyd@kernel.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/vireshk/pm.git
|
||||
@ -13020,7 +13011,7 @@ F: Documentation/networking/spider_net.txt
|
||||
F: drivers/net/ethernet/toshiba/spider_net*
|
||||
|
||||
SPMI SUBSYSTEM
|
||||
R: Stephen Boyd <sboyd@codeaurora.org>
|
||||
R: Stephen Boyd <sboyd@kernel.org>
|
||||
L: linux-arm-msm@vger.kernel.org
|
||||
F: Documentation/devicetree/bindings/spmi/
|
||||
F: drivers/spmi/
|
||||
@ -13905,7 +13896,7 @@ F: include/linux/usb/tilegx.h
|
||||
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
|
||||
M: John Stultz <john.stultz@linaro.org>
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
R: Stephen Boyd <sboyd@codeaurora.org>
|
||||
R: Stephen Boyd <sboyd@kernel.org>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
|
||||
S: Supported
|
||||
|
54
Makefile
54
Makefile
@ -434,7 +434,8 @@ export MAKE LEX YACC AWK GENKSYMS INSTALLKERNEL PERL PYTHON UTS_MACHINE
|
||||
export HOSTCXX HOSTCXXFLAGS LDFLAGS_MODULE CHECK CHECKFLAGS
|
||||
|
||||
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS LDFLAGS
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE CFLAGS_KASAN CFLAGS_UBSAN
|
||||
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
|
||||
export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE CFLAGS_UBSAN
|
||||
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
|
||||
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
|
||||
@ -679,6 +680,10 @@ endif
|
||||
# This selects the stack protector compiler flag. Testing it is delayed
|
||||
# until after .config has been reprocessed, in the prepare-compiler-check
|
||||
# target.
|
||||
ifdef CONFIG_CC_STACKPROTECTOR_AUTO
|
||||
stackp-flag := $(call cc-option,-fstack-protector-strong,$(call cc-option,-fstack-protector))
|
||||
stackp-name := AUTO
|
||||
else
|
||||
ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
|
||||
stackp-flag := -fstack-protector
|
||||
stackp-name := REGULAR
|
||||
@ -687,16 +692,40 @@ ifdef CONFIG_CC_STACKPROTECTOR_STRONG
|
||||
stackp-flag := -fstack-protector-strong
|
||||
stackp-name := STRONG
|
||||
else
|
||||
# If either there is no stack protector for this architecture or
|
||||
# CONFIG_CC_STACKPROTECTOR_NONE is selected, we're done, and $(stackp-name)
|
||||
# is empty, skipping all remaining stack protector tests.
|
||||
#
|
||||
# Force off for distro compilers that enable stack protector by default.
|
||||
stackp-flag := $(call cc-option, -fno-stack-protector)
|
||||
KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
# Find arch-specific stack protector compiler sanity-checking script.
|
||||
ifdef CONFIG_CC_STACKPROTECTOR
|
||||
ifdef stackp-name
|
||||
ifneq ($(stackp-flag),)
|
||||
stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh
|
||||
stackp-check := $(wildcard $(stackp-path))
|
||||
# If the wildcard test matches a test script, run it to check functionality.
|
||||
ifdef stackp-check
|
||||
ifneq ($(shell $(CONFIG_SHELL) $(stackp-check) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
|
||||
stackp-broken := y
|
||||
endif
|
||||
endif
|
||||
ifndef stackp-broken
|
||||
# If the stack protector is functional, enable code that depends on it.
|
||||
KBUILD_CPPFLAGS += -DCONFIG_CC_STACKPROTECTOR
|
||||
# Either we've already detected the flag (for AUTO) or we'll fail the
|
||||
# build in the prepare-compiler-check rule (for specific flag).
|
||||
KBUILD_CFLAGS += $(stackp-flag)
|
||||
else
|
||||
# We have to make sure stack protector is unconditionally disabled if
|
||||
# the compiler is broken (in case we're going to continue the build in
|
||||
# AUTO mode).
|
||||
KBUILD_CFLAGS += $(call cc-option, -fno-stack-protector)
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
KBUILD_CFLAGS += $(stackp-flag)
|
||||
|
||||
ifeq ($(cc-name),clang)
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
|
||||
@ -1091,14 +1120,25 @@ PHONY += prepare-compiler-check
|
||||
prepare-compiler-check: FORCE
|
||||
# Make sure compiler supports requested stack protector flag.
|
||||
ifdef stackp-name
|
||||
# Warn about CONFIG_CC_STACKPROTECTOR_AUTO having found no option.
|
||||
ifeq ($(stackp-flag),)
|
||||
@echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \
|
||||
Compiler does not support any known stack-protector >&2
|
||||
else
|
||||
# Fail if specifically requested stack protector is missing.
|
||||
ifeq ($(call cc-option, $(stackp-flag)),)
|
||||
@echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \
|
||||
$(stackp-flag) not supported by compiler >&2 && exit 1
|
||||
endif
|
||||
endif
|
||||
endif
|
||||
# Make sure compiler does not have buggy stack-protector support.
|
||||
ifdef stackp-check
|
||||
ifneq ($(shell $(CONFIG_SHELL) $(stackp-check) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
|
||||
# Make sure compiler does not have buggy stack-protector support. If a
|
||||
# specific stack-protector was requested, fail the build, otherwise warn.
|
||||
ifdef stackp-broken
|
||||
ifeq ($(stackp-name),AUTO)
|
||||
@echo CONFIG_CC_STACKPROTECTOR_$(stackp-name): \
|
||||
$(stackp-flag) available but compiler is broken: disabling >&2
|
||||
else
|
||||
@echo Cannot use CONFIG_CC_STACKPROTECTOR_$(stackp-name): \
|
||||
$(stackp-flag) available but compiler is broken >&2 && exit 1
|
||||
endif
|
||||
|
16
arch/Kconfig
16
arch/Kconfig
@ -538,16 +538,10 @@ config HAVE_CC_STACKPROTECTOR
|
||||
- its compiler supports the -fstack-protector option
|
||||
- it has implemented a stack canary (e.g. __stack_chk_guard)
|
||||
|
||||
config CC_STACKPROTECTOR
|
||||
def_bool n
|
||||
help
|
||||
Set when a stack-protector mode is enabled, so that the build
|
||||
can enable kernel-side support for the GCC feature.
|
||||
|
||||
choice
|
||||
prompt "Stack Protector buffer overflow detection"
|
||||
depends on HAVE_CC_STACKPROTECTOR
|
||||
default CC_STACKPROTECTOR_NONE
|
||||
default CC_STACKPROTECTOR_AUTO
|
||||
help
|
||||
This option turns on the "stack-protector" GCC feature. This
|
||||
feature puts, at the beginning of functions, a canary value on
|
||||
@ -564,7 +558,6 @@ config CC_STACKPROTECTOR_NONE
|
||||
|
||||
config CC_STACKPROTECTOR_REGULAR
|
||||
bool "Regular"
|
||||
select CC_STACKPROTECTOR
|
||||
help
|
||||
Functions will have the stack-protector canary logic added if they
|
||||
have an 8-byte or larger character array on the stack.
|
||||
@ -578,7 +571,6 @@ config CC_STACKPROTECTOR_REGULAR
|
||||
|
||||
config CC_STACKPROTECTOR_STRONG
|
||||
bool "Strong"
|
||||
select CC_STACKPROTECTOR
|
||||
help
|
||||
Functions will have the stack-protector canary logic added in any
|
||||
of the following conditions:
|
||||
@ -596,6 +588,12 @@ config CC_STACKPROTECTOR_STRONG
|
||||
about 20% of all kernel functions, which increases the kernel code
|
||||
size by about 2%.
|
||||
|
||||
config CC_STACKPROTECTOR_AUTO
|
||||
bool "Automatic"
|
||||
help
|
||||
If the compiler supports it, the best available stack-protector
|
||||
option will be chosen.
|
||||
|
||||
endchoice
|
||||
|
||||
config THIN_ARCHIVES
|
||||
|
@ -338,6 +338,7 @@ static inline int find_next_bit_le(const void *p, int size, int offset)
|
||||
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitops/find.h>
|
||||
#include <asm-generic/bitops/le.h>
|
||||
|
||||
/*
|
||||
|
@ -12,7 +12,8 @@
|
||||
|
||||
/*
|
||||
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
|
||||
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/8 of kernel virtual addresses.
|
||||
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
|
||||
* where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
|
||||
*/
|
||||
#define KASAN_SHADOW_START (VA_START)
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
|
||||
@ -20,14 +21,16 @@
|
||||
/*
|
||||
* This value is used to map an address to the corresponding shadow
|
||||
* address by the following formula:
|
||||
* shadow_addr = (address >> 3) + KASAN_SHADOW_OFFSET;
|
||||
* shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
|
||||
*
|
||||
* (1 << 61) shadow addresses - [KASAN_SHADOW_OFFSET,KASAN_SHADOW_END]
|
||||
* cover all 64-bits of virtual addresses. So KASAN_SHADOW_OFFSET
|
||||
* should satisfy the following equation:
|
||||
* KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - (1ULL << 61)
|
||||
* (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
|
||||
* [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
|
||||
* addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
|
||||
* KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
|
||||
* (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
|
||||
*/
|
||||
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << (64 - 3)))
|
||||
#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \
|
||||
(64 - KASAN_SHADOW_SCALE_SHIFT)))
|
||||
|
||||
void kasan_init(void);
|
||||
void kasan_copy_shadow(pgd_t *pgdir);
|
||||
|
@ -85,7 +85,8 @@
|
||||
* stack size when KASAN is in use.
|
||||
*/
|
||||
#ifdef CONFIG_KASAN
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - 3))
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
|
||||
#define KASAN_THREAD_SHIFT 1
|
||||
#else
|
||||
#define KASAN_SHADOW_SIZE (0)
|
||||
|
@ -925,9 +925,8 @@ static void __armv8pmu_probe_pmu(void *info)
|
||||
pmceid[0] = read_sysreg(pmceid0_el0);
|
||||
pmceid[1] = read_sysreg(pmceid1_el0);
|
||||
|
||||
bitmap_from_u32array(cpu_pmu->pmceid_bitmap,
|
||||
ARMV8_PMUV3_MAX_COMMON_EVENTS, pmceid,
|
||||
ARRAY_SIZE(pmceid));
|
||||
bitmap_from_arr32(cpu_pmu->pmceid_bitmap,
|
||||
pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS);
|
||||
}
|
||||
|
||||
static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
|
||||
|
@ -135,7 +135,8 @@ static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
|
||||
/* The early shadow maps everything to a single page of zeroes */
|
||||
asmlinkage void __init kasan_early_init(void)
|
||||
{
|
||||
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
|
||||
BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
|
||||
KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
|
||||
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
|
||||
BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
|
||||
kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
|
||||
|
@ -2610,17 +2610,10 @@ pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task)
|
||||
if (pid < 2) return -EPERM;
|
||||
|
||||
if (pid != task_pid_vnr(current)) {
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
|
||||
p = find_task_by_vpid(pid);
|
||||
|
||||
/* make sure task cannot go away while we operate on it */
|
||||
if (p) get_task_struct(p);
|
||||
|
||||
read_unlock(&tasklist_lock);
|
||||
|
||||
if (p == NULL) return -ESRCH;
|
||||
p = find_get_task_by_vpid(pid);
|
||||
if (!p)
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
ret = pfm_task_incompatible(ctx, p);
|
||||
|
@ -311,7 +311,6 @@ static inline int bfchg_mem_test_and_change_bit(int nr,
|
||||
* functions.
|
||||
*/
|
||||
#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
|
||||
#include <asm-generic/bitops/find.h>
|
||||
#include <asm-generic/bitops/ffz.h>
|
||||
#else
|
||||
|
||||
@ -441,6 +440,8 @@ static inline unsigned long ffz(unsigned long word)
|
||||
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#if defined(CONFIG_CPU_HAS_NO_BITFIELDS)
|
||||
|
@ -124,9 +124,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
{
|
||||
unsigned long n = (unsigned long) v - 1;
|
||||
|
||||
seq_printf(m, "processor\t\t: %ld\n", n);
|
||||
seq_printf(m, "\n");
|
||||
|
||||
seq_printf(m, "processor\t\t: %ld\n\n", n);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -44,4 +44,6 @@ static inline int fls(int x)
|
||||
#define find_first_bit find_first_bit
|
||||
#define find_first_zero_bit find_first_zero_bit
|
||||
|
||||
#include <asm-generic/bitops/find.h>
|
||||
|
||||
#endif /* __UNICORE_BITOPS_H__ */
|
||||
|
@ -324,7 +324,7 @@ config X86_64_SMP
|
||||
|
||||
config X86_32_LAZY_GS
|
||||
def_bool y
|
||||
depends on X86_32 && !CC_STACKPROTECTOR
|
||||
depends on X86_32 && CC_STACKPROTECTOR_NONE
|
||||
|
||||
config ARCH_SUPPORTS_UPROBES
|
||||
def_bool y
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include <linux/const.h>
|
||||
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
|
||||
/*
|
||||
* Compiler uses shadow offset assuming that addresses start
|
||||
@ -12,12 +13,15 @@
|
||||
* 'kernel address space start' >> KASAN_SHADOW_SCALE_SHIFT
|
||||
*/
|
||||
#define KASAN_SHADOW_START (KASAN_SHADOW_OFFSET + \
|
||||
((-1UL << __VIRTUAL_MASK_SHIFT) >> 3))
|
||||
((-1UL << __VIRTUAL_MASK_SHIFT) >> \
|
||||
KASAN_SHADOW_SCALE_SHIFT))
|
||||
/*
|
||||
* 47 bits for kernel address -> (47 - 3) bits for shadow
|
||||
* 56 bits for kernel address -> (56 - 3) bits for shadow
|
||||
* 47 bits for kernel address -> (47 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow
|
||||
* 56 bits for kernel address -> (56 - KASAN_SHADOW_SCALE_SHIFT) bits for shadow
|
||||
*/
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + (1ULL << (__VIRTUAL_MASK_SHIFT - 3)))
|
||||
#define KASAN_SHADOW_END (KASAN_SHADOW_START + \
|
||||
(1ULL << (__VIRTUAL_MASK_SHIFT - \
|
||||
KASAN_SHADOW_SCALE_SHIFT)))
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
@ -658,10 +658,8 @@ static void dsu_pmu_probe_pmu(struct dsu_pmu *dsu_pmu)
|
||||
return;
|
||||
cpmceid[0] = __dsu_pmu_read_pmceid(0);
|
||||
cpmceid[1] = __dsu_pmu_read_pmceid(1);
|
||||
bitmap_from_u32array(dsu_pmu->cpmceid_bitmap,
|
||||
DSU_PMU_MAX_COMMON_EVENTS,
|
||||
cpmceid,
|
||||
ARRAY_SIZE(cpmceid));
|
||||
bitmap_from_arr32(dsu_pmu->cpmceid_bitmap, cpmceid,
|
||||
DSU_PMU_MAX_COMMON_EVENTS);
|
||||
}
|
||||
|
||||
static void dsu_pmu_set_active_cpu(int cpu, struct dsu_pmu *dsu_pmu)
|
||||
|
@ -70,7 +70,7 @@ static long hrtimer_error = SAFETY_INTERVAL;
|
||||
/* the kernel hrtimer event */
|
||||
static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
|
||||
{
|
||||
struct timespec expire_time, ts1, ts2, ts3, dts;
|
||||
struct timespec64 expire_time, ts1, ts2, ts3, dts;
|
||||
struct pps_generator_pp *dev;
|
||||
struct parport *port;
|
||||
long lim, delta;
|
||||
@ -78,7 +78,7 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
|
||||
|
||||
/* We have to disable interrupts here. The idea is to prevent
|
||||
* other interrupts on the same processor to introduce random
|
||||
* lags while polling the clock. getnstimeofday() takes <1us on
|
||||
* lags while polling the clock. ktime_get_real_ts64() takes <1us on
|
||||
* most machines while other interrupt handlers can take much
|
||||
* more potentially.
|
||||
*
|
||||
@ -88,22 +88,22 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
|
||||
local_irq_save(flags);
|
||||
|
||||
/* first of all we get the time stamp... */
|
||||
getnstimeofday(&ts1);
|
||||
expire_time = ktime_to_timespec(hrtimer_get_softexpires(timer));
|
||||
ktime_get_real_ts64(&ts1);
|
||||
expire_time = ktime_to_timespec64(hrtimer_get_softexpires(timer));
|
||||
dev = container_of(timer, struct pps_generator_pp, timer);
|
||||
lim = NSEC_PER_SEC - send_delay - dev->port_write_time;
|
||||
|
||||
/* check if we are late */
|
||||
if (expire_time.tv_sec != ts1.tv_sec || ts1.tv_nsec > lim) {
|
||||
local_irq_restore(flags);
|
||||
pr_err("we are late this time %ld.%09ld\n",
|
||||
ts1.tv_sec, ts1.tv_nsec);
|
||||
pr_err("we are late this time %lld.%09ld\n",
|
||||
(s64)ts1.tv_sec, ts1.tv_nsec);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* busy loop until the time is right for an assert edge */
|
||||
do {
|
||||
getnstimeofday(&ts2);
|
||||
ktime_get_real_ts64(&ts2);
|
||||
} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
|
||||
|
||||
/* set the signal */
|
||||
@ -113,25 +113,25 @@ static enum hrtimer_restart hrtimer_event(struct hrtimer *timer)
|
||||
/* busy loop until the time is right for a clear edge */
|
||||
lim = NSEC_PER_SEC - dev->port_write_time;
|
||||
do {
|
||||
getnstimeofday(&ts2);
|
||||
ktime_get_real_ts64(&ts2);
|
||||
} while (expire_time.tv_sec == ts2.tv_sec && ts2.tv_nsec < lim);
|
||||
|
||||
/* unset the signal */
|
||||
port->ops->write_control(port, NO_SIGNAL);
|
||||
|
||||
getnstimeofday(&ts3);
|
||||
ktime_get_real_ts64(&ts3);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
/* update calibrated port write time */
|
||||
dts = timespec_sub(ts3, ts2);
|
||||
dts = timespec64_sub(ts3, ts2);
|
||||
dev->port_write_time =
|
||||
(dev->port_write_time + timespec_to_ns(&dts)) >> 1;
|
||||
(dev->port_write_time + timespec64_to_ns(&dts)) >> 1;
|
||||
|
||||
done:
|
||||
/* update calibrated hrtimer error */
|
||||
dts = timespec_sub(ts1, expire_time);
|
||||
delta = timespec_to_ns(&dts);
|
||||
dts = timespec64_sub(ts1, expire_time);
|
||||
delta = timespec64_to_ns(&dts);
|
||||
/* If the new error value is bigger then the old, use the new
|
||||
* value, if not then slowly move towards the new value. This
|
||||
* way it should be safe in bad conditions and efficient in
|
||||
@ -161,17 +161,17 @@ static void calibrate_port(struct pps_generator_pp *dev)
|
||||
long acc = 0;
|
||||
|
||||
for (i = 0; i < (1 << PORT_NTESTS_SHIFT); i++) {
|
||||
struct timespec a, b;
|
||||
struct timespec64 a, b;
|
||||
unsigned long irq_flags;
|
||||
|
||||
local_irq_save(irq_flags);
|
||||
getnstimeofday(&a);
|
||||
ktime_get_real_ts64(&a);
|
||||
port->ops->write_control(port, NO_SIGNAL);
|
||||
getnstimeofday(&b);
|
||||
ktime_get_real_ts64(&b);
|
||||
local_irq_restore(irq_flags);
|
||||
|
||||
b = timespec_sub(b, a);
|
||||
acc += timespec_to_ns(&b);
|
||||
b = timespec64_sub(b, a);
|
||||
acc += timespec64_to_ns(&b);
|
||||
}
|
||||
|
||||
dev->port_write_time = acc >> PORT_NTESTS_SHIFT;
|
||||
@ -180,9 +180,9 @@ static void calibrate_port(struct pps_generator_pp *dev)
|
||||
|
||||
static inline ktime_t next_intr_time(struct pps_generator_pp *dev)
|
||||
{
|
||||
struct timespec ts;
|
||||
struct timespec64 ts;
|
||||
|
||||
getnstimeofday(&ts);
|
||||
ktime_get_real_ts64(&ts);
|
||||
|
||||
return ktime_set(ts.tv_sec +
|
||||
((ts.tv_nsec > 990 * NSEC_PER_MSEC) ? 1 : 0),
|
||||
|
@ -222,7 +222,7 @@ static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
|
||||
struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
|
||||
#endif
|
||||
|
||||
if (bdma_chan->bd_base == NULL)
|
||||
if (!bdma_chan->bd_base)
|
||||
return 0;
|
||||
|
||||
/* Check if DMA channel still running */
|
||||
@ -346,7 +346,7 @@ tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
|
||||
{
|
||||
u64 rio_addr;
|
||||
|
||||
if (bd_ptr == NULL)
|
||||
if (!bd_ptr)
|
||||
return -EINVAL;
|
||||
|
||||
/* Initialize DMA descriptor */
|
||||
@ -370,7 +370,7 @@ tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
|
||||
static int
|
||||
tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
|
||||
{
|
||||
if (bd_ptr == NULL)
|
||||
if (!bd_ptr)
|
||||
return -EINVAL;
|
||||
|
||||
/* Update DMA descriptor */
|
||||
@ -555,9 +555,7 @@ static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
|
||||
* If there is no data transfer in progress, fetch new descriptor from
|
||||
* the pending queue.
|
||||
*/
|
||||
|
||||
if (desc == NULL && bdma_chan->active_tx == NULL &&
|
||||
!list_empty(&bdma_chan->queue)) {
|
||||
if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
|
||||
desc = list_first_entry(&bdma_chan->queue,
|
||||
struct tsi721_tx_desc, desc_node);
|
||||
list_del_init((&desc->desc_node));
|
||||
@ -735,7 +733,7 @@ static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
|
||||
static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
|
||||
{
|
||||
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
|
||||
struct tsi721_tx_desc *desc = NULL;
|
||||
struct tsi721_tx_desc *desc;
|
||||
int i;
|
||||
|
||||
tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
|
||||
@ -754,9 +752,6 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
|
||||
desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
|
||||
GFP_ATOMIC);
|
||||
if (!desc) {
|
||||
tsi_err(&dchan->dev->device,
|
||||
"DMAC%d Failed to allocate logical descriptors",
|
||||
bdma_chan->id);
|
||||
tsi721_bdma_ch_free(bdma_chan);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -799,7 +794,7 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
|
||||
|
||||
tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
|
||||
|
||||
if (bdma_chan->bd_base == NULL)
|
||||
if (!bdma_chan->bd_base)
|
||||
return;
|
||||
|
||||
tsi721_bdma_interrupt_enable(bdma_chan, 0);
|
||||
|
@ -81,6 +81,7 @@ u16 rio_local_get_device_id(struct rio_mport *port)
|
||||
|
||||
return (RIO_GET_DID(port->sys_size, result));
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_local_get_device_id);
|
||||
|
||||
/**
|
||||
* rio_query_mport - Query mport device attributes
|
||||
@ -110,9 +111,8 @@ EXPORT_SYMBOL(rio_query_mport);
|
||||
*/
|
||||
struct rio_net *rio_alloc_net(struct rio_mport *mport)
|
||||
{
|
||||
struct rio_net *net;
|
||||
struct rio_net *net = kzalloc(sizeof(*net), GFP_KERNEL);
|
||||
|
||||
net = kzalloc(sizeof(struct rio_net), GFP_KERNEL);
|
||||
if (net) {
|
||||
INIT_LIST_HEAD(&net->node);
|
||||
INIT_LIST_HEAD(&net->devices);
|
||||
@ -243,18 +243,17 @@ int rio_request_inb_mbox(struct rio_mport *mport,
|
||||
int rc = -ENOSYS;
|
||||
struct resource *res;
|
||||
|
||||
if (mport->ops->open_inb_mbox == NULL)
|
||||
if (!mport->ops->open_inb_mbox)
|
||||
goto out;
|
||||
|
||||
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
||||
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
if (res) {
|
||||
rio_init_mbox_res(res, mbox, mbox);
|
||||
|
||||
/* Make sure this mailbox isn't in use */
|
||||
if ((rc =
|
||||
request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE],
|
||||
res)) < 0) {
|
||||
rc = request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE],
|
||||
res);
|
||||
if (rc < 0) {
|
||||
kfree(res);
|
||||
goto out;
|
||||
}
|
||||
@ -277,6 +276,7 @@ int rio_request_inb_mbox(struct rio_mport *mport,
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_request_inb_mbox);
|
||||
|
||||
/**
|
||||
* rio_release_inb_mbox - release inbound mailbox message service
|
||||
@ -305,6 +305,7 @@ int rio_release_inb_mbox(struct rio_mport *mport, int mbox)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_release_inb_mbox);
|
||||
|
||||
/**
|
||||
* rio_request_outb_mbox - request outbound mailbox service
|
||||
@ -326,18 +327,17 @@ int rio_request_outb_mbox(struct rio_mport *mport,
|
||||
int rc = -ENOSYS;
|
||||
struct resource *res;
|
||||
|
||||
if (mport->ops->open_outb_mbox == NULL)
|
||||
if (!mport->ops->open_outb_mbox)
|
||||
goto out;
|
||||
|
||||
res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
||||
|
||||
res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
if (res) {
|
||||
rio_init_mbox_res(res, mbox, mbox);
|
||||
|
||||
/* Make sure this outbound mailbox isn't in use */
|
||||
if ((rc =
|
||||
request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE],
|
||||
res)) < 0) {
|
||||
rc = request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE],
|
||||
res);
|
||||
if (rc < 0) {
|
||||
kfree(res);
|
||||
goto out;
|
||||
}
|
||||
@ -360,6 +360,7 @@ int rio_request_outb_mbox(struct rio_mport *mport,
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_request_outb_mbox);
|
||||
|
||||
/**
|
||||
* rio_release_outb_mbox - release outbound mailbox message service
|
||||
@ -388,6 +389,7 @@ int rio_release_outb_mbox(struct rio_mport *mport, int mbox)
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
|
||||
|
||||
/**
|
||||
* rio_setup_inb_dbell - bind inbound doorbell callback
|
||||
@ -405,13 +407,10 @@ rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res,
|
||||
void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst,
|
||||
u16 info))
|
||||
{
|
||||
int rc = 0;
|
||||
struct rio_dbell *dbell;
|
||||
struct rio_dbell *dbell = kmalloc(sizeof(*dbell), GFP_KERNEL);
|
||||
|
||||
if (!(dbell = kmalloc(sizeof(struct rio_dbell), GFP_KERNEL))) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!dbell)
|
||||
return -ENOMEM;
|
||||
|
||||
dbell->res = res;
|
||||
dbell->dinb = dinb;
|
||||
@ -420,9 +419,7 @@ rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res,
|
||||
mutex_lock(&mport->lock);
|
||||
list_add_tail(&dbell->node, &mport->dbells);
|
||||
mutex_unlock(&mport->lock);
|
||||
|
||||
out:
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -444,17 +441,16 @@ int rio_request_inb_dbell(struct rio_mport *mport,
|
||||
void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src,
|
||||
u16 dst, u16 info))
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
struct resource *res = kzalloc(sizeof(struct resource), GFP_KERNEL);
|
||||
int rc;
|
||||
struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
|
||||
|
||||
if (res) {
|
||||
rio_init_dbell_res(res, start, end);
|
||||
|
||||
/* Make sure these doorbells aren't in use */
|
||||
if ((rc =
|
||||
request_resource(&mport->riores[RIO_DOORBELL_RESOURCE],
|
||||
res)) < 0) {
|
||||
rc = request_resource(&mport->riores[RIO_DOORBELL_RESOURCE],
|
||||
res);
|
||||
if (rc < 0) {
|
||||
kfree(res);
|
||||
goto out;
|
||||
}
|
||||
@ -467,6 +463,7 @@ int rio_request_inb_dbell(struct rio_mport *mport,
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_request_inb_dbell);
|
||||
|
||||
/**
|
||||
* rio_release_inb_dbell - release inbound doorbell message service
|
||||
@ -508,6 +505,7 @@ int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end)
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_release_inb_dbell);
|
||||
|
||||
/**
|
||||
* rio_request_outb_dbell - request outbound doorbell message range
|
||||
@ -536,6 +534,7 @@ struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start,
|
||||
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_request_outb_dbell);
|
||||
|
||||
/**
|
||||
* rio_release_outb_dbell - release outbound doorbell message range
|
||||
@ -553,6 +552,7 @@ int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res)
|
||||
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_release_outb_dbell);
|
||||
|
||||
/**
|
||||
* rio_add_mport_pw_handler - add port-write message handler into the list
|
||||
@ -567,22 +567,17 @@ int rio_add_mport_pw_handler(struct rio_mport *mport, void *context,
|
||||
int (*pwcback)(struct rio_mport *mport,
|
||||
void *context, union rio_pw_msg *msg, int step))
|
||||
{
|
||||
int rc = 0;
|
||||
struct rio_pwrite *pwrite;
|
||||
struct rio_pwrite *pwrite = kzalloc(sizeof(*pwrite), GFP_KERNEL);
|
||||
|
||||
pwrite = kzalloc(sizeof(struct rio_pwrite), GFP_KERNEL);
|
||||
if (!pwrite) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
if (!pwrite)
|
||||
return -ENOMEM;
|
||||
|
||||
pwrite->pwcback = pwcback;
|
||||
pwrite->context = context;
|
||||
mutex_lock(&mport->lock);
|
||||
list_add_tail(&pwrite->node, &mport->pwrites);
|
||||
mutex_unlock(&mport->lock);
|
||||
out:
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_add_mport_pw_handler);
|
||||
|
||||
@ -632,7 +627,7 @@ int rio_request_inb_pwrite(struct rio_dev *rdev,
|
||||
int rc = 0;
|
||||
|
||||
spin_lock(&rio_global_list_lock);
|
||||
if (rdev->pwcback != NULL)
|
||||
if (rdev->pwcback)
|
||||
rc = -ENOMEM;
|
||||
else
|
||||
rdev->pwcback = pwcback;
|
||||
@ -698,7 +693,7 @@ EXPORT_SYMBOL_GPL(rio_pw_enable);
|
||||
int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local,
|
||||
u64 rbase, u32 size, u32 rflags)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc;
|
||||
unsigned long flags;
|
||||
|
||||
if (!mport->ops->map_inb)
|
||||
@ -742,7 +737,7 @@ EXPORT_SYMBOL_GPL(rio_unmap_inb_region);
|
||||
int rio_map_outb_region(struct rio_mport *mport, u16 destid, u64 rbase,
|
||||
u32 size, u32 rflags, dma_addr_t *local)
|
||||
{
|
||||
int rc = 0;
|
||||
int rc;
|
||||
unsigned long flags;
|
||||
|
||||
if (!mport->ops->map_outb)
|
||||
@ -975,7 +970,7 @@ rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum)
|
||||
rdev = rdev->prev;
|
||||
}
|
||||
|
||||
if (prev == NULL)
|
||||
if (!prev)
|
||||
goto err_out;
|
||||
|
||||
p_port = prev->rswitch->route_table[rdev->destid];
|
||||
@ -1054,7 +1049,7 @@ rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp)
|
||||
RIO_MNT_REQ_CMD_IS);
|
||||
|
||||
/* Exit if the response is not expected */
|
||||
if (lnkresp == NULL)
|
||||
if (!lnkresp)
|
||||
return 0;
|
||||
|
||||
checkcount = 3;
|
||||
@ -1411,7 +1406,9 @@ rio_mport_get_feature(struct rio_mport * port, int local, u16 destid,
|
||||
ext_ftr_ptr, &ftr_header);
|
||||
if (RIO_GET_BLOCK_ID(ftr_header) == ftr)
|
||||
return ext_ftr_ptr;
|
||||
if (!(ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header)))
|
||||
|
||||
ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header);
|
||||
if (!ext_ftr_ptr)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1462,6 +1459,7 @@ struct rio_dev *rio_get_asm(u16 vid, u16 did,
|
||||
spin_unlock(&rio_global_list_lock);
|
||||
return rdev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_get_asm);
|
||||
|
||||
/**
|
||||
* rio_get_device - Begin or continue searching for a RIO device by vid/did
|
||||
@ -1481,6 +1479,7 @@ struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from)
|
||||
{
|
||||
return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_get_device);
|
||||
|
||||
/**
|
||||
* rio_std_route_add_entry - Add switch route table entry using standard
|
||||
@ -1696,7 +1695,7 @@ int rio_route_add_entry(struct rio_dev *rdev,
|
||||
|
||||
spin_lock(&rdev->rswitch->lock);
|
||||
|
||||
if (ops == NULL || ops->add_entry == NULL) {
|
||||
if (!ops || !ops->add_entry) {
|
||||
rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid,
|
||||
rdev->hopcount, table,
|
||||
route_destid, route_port);
|
||||
@ -1749,7 +1748,7 @@ int rio_route_get_entry(struct rio_dev *rdev, u16 table,
|
||||
|
||||
spin_lock(&rdev->rswitch->lock);
|
||||
|
||||
if (ops == NULL || ops->get_entry == NULL) {
|
||||
if (!ops || !ops->get_entry) {
|
||||
rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid,
|
||||
rdev->hopcount, table,
|
||||
route_destid, route_port);
|
||||
@ -1797,7 +1796,7 @@ int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock)
|
||||
|
||||
spin_lock(&rdev->rswitch->lock);
|
||||
|
||||
if (ops == NULL || ops->clr_table == NULL) {
|
||||
if (!ops || !ops->clr_table) {
|
||||
rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid,
|
||||
rdev->hopcount, table);
|
||||
} else if (try_module_get(ops->owner)) {
|
||||
@ -1889,7 +1888,7 @@ struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan,
|
||||
{
|
||||
struct rio_dma_ext rio_ext;
|
||||
|
||||
if (dchan->device->device_prep_slave_sg == NULL) {
|
||||
if (!dchan->device->device_prep_slave_sg) {
|
||||
pr_err("%s: prep_rio_sg == NULL\n", __func__);
|
||||
return NULL;
|
||||
}
|
||||
@ -2189,7 +2188,6 @@ int rio_init_mports(void)
|
||||
|
||||
work = kcalloc(n, sizeof *work, GFP_KERNEL);
|
||||
if (!work) {
|
||||
pr_err("RIO: no memory for work struct\n");
|
||||
destroy_workqueue(rio_wq);
|
||||
goto no_disc;
|
||||
}
|
||||
@ -2216,6 +2214,7 @@ no_disc:
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_init_mports);
|
||||
|
||||
static int rio_get_hdid(int index)
|
||||
{
|
||||
@ -2330,16 +2329,3 @@ int rio_unregister_mport(struct rio_mport *port)
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(rio_unregister_mport);
|
||||
|
||||
EXPORT_SYMBOL_GPL(rio_local_get_device_id);
|
||||
EXPORT_SYMBOL_GPL(rio_get_device);
|
||||
EXPORT_SYMBOL_GPL(rio_get_asm);
|
||||
EXPORT_SYMBOL_GPL(rio_request_inb_dbell);
|
||||
EXPORT_SYMBOL_GPL(rio_release_inb_dbell);
|
||||
EXPORT_SYMBOL_GPL(rio_request_outb_dbell);
|
||||
EXPORT_SYMBOL_GPL(rio_release_outb_dbell);
|
||||
EXPORT_SYMBOL_GPL(rio_request_inb_mbox);
|
||||
EXPORT_SYMBOL_GPL(rio_release_inb_mbox);
|
||||
EXPORT_SYMBOL_GPL(rio_request_outb_mbox);
|
||||
EXPORT_SYMBOL_GPL(rio_release_outb_mbox);
|
||||
EXPORT_SYMBOL_GPL(rio_init_mports);
|
||||
|
@ -1599,6 +1599,8 @@ static int fill_files_note(struct memelfnote *note)
|
||||
|
||||
/* *Estimated* file count and total data size needed */
|
||||
count = current->mm->map_count;
|
||||
if (count > UINT_MAX / 64)
|
||||
return -EINVAL;
|
||||
size = count * 64;
|
||||
|
||||
names_ofs = (2 + 3 * count) * sizeof(data[0]);
|
||||
|
@ -444,7 +444,7 @@ static int hfsplus_symlink(struct inode *dir, struct dentry *dentry,
|
||||
int res = -ENOMEM;
|
||||
|
||||
mutex_lock(&sbi->vh_mutex);
|
||||
inode = hfsplus_new_inode(dir->i_sb, S_IFLNK | S_IRWXUGO);
|
||||
inode = hfsplus_new_inode(dir->i_sb, dir, S_IFLNK | S_IRWXUGO);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
@ -486,7 +486,7 @@ static int hfsplus_mknod(struct inode *dir, struct dentry *dentry,
|
||||
int res = -ENOMEM;
|
||||
|
||||
mutex_lock(&sbi->vh_mutex);
|
||||
inode = hfsplus_new_inode(dir->i_sb, mode);
|
||||
inode = hfsplus_new_inode(dir->i_sb, dir, mode);
|
||||
if (!inode)
|
||||
goto out;
|
||||
|
||||
|
@ -478,7 +478,8 @@ extern const struct address_space_operations hfsplus_aops;
|
||||
extern const struct address_space_operations hfsplus_btree_aops;
|
||||
extern const struct dentry_operations hfsplus_dentry_operations;
|
||||
|
||||
struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode);
|
||||
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode);
|
||||
void hfsplus_delete_inode(struct inode *inode);
|
||||
void hfsplus_inode_read_fork(struct inode *inode,
|
||||
struct hfsplus_fork_raw *fork);
|
||||
|
@ -354,7 +354,8 @@ static const struct file_operations hfsplus_file_operations = {
|
||||
.unlocked_ioctl = hfsplus_ioctl,
|
||||
};
|
||||
|
||||
struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
|
||||
struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
|
||||
umode_t mode)
|
||||
{
|
||||
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
|
||||
struct inode *inode = new_inode(sb);
|
||||
@ -364,9 +365,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
|
||||
return NULL;
|
||||
|
||||
inode->i_ino = sbi->next_cnid++;
|
||||
inode->i_mode = mode;
|
||||
inode->i_uid = current_fsuid();
|
||||
inode->i_gid = current_fsgid();
|
||||
inode_init_owner(inode, dir, mode);
|
||||
set_nlink(inode, 1);
|
||||
inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
|
||||
|
||||
|
@ -549,7 +549,7 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
|
||||
|
||||
if (!sbi->hidden_dir) {
|
||||
mutex_lock(&sbi->vh_mutex);
|
||||
sbi->hidden_dir = hfsplus_new_inode(sb, S_IFDIR);
|
||||
sbi->hidden_dir = hfsplus_new_inode(sb, root, S_IFDIR);
|
||||
if (!sbi->hidden_dir) {
|
||||
mutex_unlock(&sbi->vh_mutex);
|
||||
err = -ENOMEM;
|
||||
|
@ -498,7 +498,6 @@ EXPORT_SYMBOL(__remove_inode_hash);
|
||||
|
||||
void clear_inode(struct inode *inode)
|
||||
{
|
||||
might_sleep();
|
||||
/*
|
||||
* We have to cycle tree_lock here because reclaim can be still in the
|
||||
* process of removing the last page (in __delete_from_page_cache())
|
||||
|
@ -130,7 +130,7 @@ int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *segbuf,
|
||||
}
|
||||
|
||||
int nilfs_segbuf_reset(struct nilfs_segment_buffer *segbuf, unsigned int flags,
|
||||
time_t ctime, __u64 cno)
|
||||
time64_t ctime, __u64 cno)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -46,7 +46,7 @@ struct nilfs_segsum_info {
|
||||
unsigned long nfileblk;
|
||||
u64 seg_seq;
|
||||
__u64 cno;
|
||||
time_t ctime;
|
||||
time64_t ctime;
|
||||
sector_t next;
|
||||
};
|
||||
|
||||
@ -120,7 +120,7 @@ void nilfs_segbuf_map_cont(struct nilfs_segment_buffer *segbuf,
|
||||
struct nilfs_segment_buffer *prev);
|
||||
void nilfs_segbuf_set_next_segnum(struct nilfs_segment_buffer *, __u64,
|
||||
struct the_nilfs *);
|
||||
int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned int, time_t,
|
||||
int nilfs_segbuf_reset(struct nilfs_segment_buffer *, unsigned int, time64_t,
|
||||
__u64);
|
||||
int nilfs_segbuf_extend_segsum(struct nilfs_segment_buffer *);
|
||||
int nilfs_segbuf_extend_payload(struct nilfs_segment_buffer *,
|
||||
|
@ -2040,7 +2040,7 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
|
||||
goto out;
|
||||
|
||||
/* Update time stamp */
|
||||
sci->sc_seg_ctime = get_seconds();
|
||||
sci->sc_seg_ctime = ktime_get_real_seconds();
|
||||
|
||||
err = nilfs_segctor_collect(sci, nilfs, mode);
|
||||
if (unlikely(err))
|
||||
|
@ -157,7 +157,7 @@ struct nilfs_sc_info {
|
||||
unsigned long sc_blk_cnt;
|
||||
unsigned long sc_datablk_cnt;
|
||||
unsigned long sc_nblk_this_inc;
|
||||
time_t sc_seg_ctime;
|
||||
time64_t sc_seg_ctime;
|
||||
__u64 sc_cno;
|
||||
unsigned long sc_flags;
|
||||
|
||||
|
@ -526,7 +526,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
|
||||
* @modtime: modification time (option)
|
||||
*/
|
||||
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
|
||||
unsigned long nblocks, time_t modtime)
|
||||
unsigned long nblocks, time64_t modtime)
|
||||
{
|
||||
struct buffer_head *bh;
|
||||
struct nilfs_segment_usage *su;
|
||||
|
@ -35,7 +35,7 @@ int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end);
|
||||
int nilfs_sufile_alloc(struct inode *, __u64 *);
|
||||
int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum);
|
||||
int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
|
||||
unsigned long nblocks, time_t modtime);
|
||||
unsigned long nblocks, time64_t modtime);
|
||||
int nilfs_sufile_get_stat(struct inode *, struct nilfs_sustat *);
|
||||
ssize_t nilfs_sufile_get_suinfo(struct inode *, __u64, void *, unsigned int,
|
||||
size_t);
|
||||
|
@ -283,10 +283,10 @@ int nilfs_commit_super(struct super_block *sb, int flag)
|
||||
{
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
struct nilfs_super_block **sbp = nilfs->ns_sbp;
|
||||
time_t t;
|
||||
time64_t t;
|
||||
|
||||
/* nilfs->ns_sem must be locked by the caller. */
|
||||
t = get_seconds();
|
||||
t = ktime_get_real_seconds();
|
||||
nilfs->ns_sbwtime = t;
|
||||
sbp[0]->s_wtime = cpu_to_le64(t);
|
||||
sbp[0]->s_sum = 0;
|
||||
|
@ -31,7 +31,7 @@ static struct kset *nilfs_kset;
|
||||
#define NILFS_SHOW_TIME(time_t_val, buf) ({ \
|
||||
struct tm res; \
|
||||
int count = 0; \
|
||||
time_to_tm(time_t_val, 0, &res); \
|
||||
time64_to_tm(time_t_val, 0, &res); \
|
||||
res.tm_year += 1900; \
|
||||
res.tm_mon += 1; \
|
||||
count = scnprintf(buf, PAGE_SIZE, \
|
||||
@ -579,7 +579,7 @@ nilfs_segctor_last_seg_write_time_show(struct nilfs_segctor_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
time_t ctime;
|
||||
time64_t ctime;
|
||||
|
||||
down_read(&nilfs->ns_segctor_sem);
|
||||
ctime = nilfs->ns_ctime;
|
||||
@ -593,13 +593,13 @@ nilfs_segctor_last_seg_write_time_secs_show(struct nilfs_segctor_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
time_t ctime;
|
||||
time64_t ctime;
|
||||
|
||||
down_read(&nilfs->ns_segctor_sem);
|
||||
ctime = nilfs->ns_ctime;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)ctime);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", ctime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -607,7 +607,7 @@ nilfs_segctor_last_nongc_write_time_show(struct nilfs_segctor_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
time_t nongc_ctime;
|
||||
time64_t nongc_ctime;
|
||||
|
||||
down_read(&nilfs->ns_segctor_sem);
|
||||
nongc_ctime = nilfs->ns_nongc_ctime;
|
||||
@ -621,14 +621,13 @@ nilfs_segctor_last_nongc_write_time_secs_show(struct nilfs_segctor_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
time_t nongc_ctime;
|
||||
time64_t nongc_ctime;
|
||||
|
||||
down_read(&nilfs->ns_segctor_sem);
|
||||
nongc_ctime = nilfs->ns_nongc_ctime;
|
||||
up_read(&nilfs->ns_segctor_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long)nongc_ctime);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", nongc_ctime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
@ -728,7 +727,7 @@ nilfs_superblock_sb_write_time_show(struct nilfs_superblock_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
time_t sbwtime;
|
||||
time64_t sbwtime;
|
||||
|
||||
down_read(&nilfs->ns_sem);
|
||||
sbwtime = nilfs->ns_sbwtime;
|
||||
@ -742,13 +741,13 @@ nilfs_superblock_sb_write_time_secs_show(struct nilfs_superblock_attr *attr,
|
||||
struct the_nilfs *nilfs,
|
||||
char *buf)
|
||||
{
|
||||
time_t sbwtime;
|
||||
time64_t sbwtime;
|
||||
|
||||
down_read(&nilfs->ns_sem);
|
||||
sbwtime = nilfs->ns_sbwtime;
|
||||
up_read(&nilfs->ns_sem);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", (unsigned long long)sbwtime);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", sbwtime);
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
|
@ -116,7 +116,7 @@ struct the_nilfs {
|
||||
*/
|
||||
struct buffer_head *ns_sbh[2];
|
||||
struct nilfs_super_block *ns_sbp[2];
|
||||
time_t ns_sbwtime;
|
||||
time64_t ns_sbwtime;
|
||||
unsigned int ns_sbwcount;
|
||||
unsigned int ns_sbsize;
|
||||
unsigned int ns_mount_state;
|
||||
@ -131,8 +131,8 @@ struct the_nilfs {
|
||||
__u64 ns_nextnum;
|
||||
unsigned long ns_pseg_offset;
|
||||
__u64 ns_cno;
|
||||
time_t ns_ctime;
|
||||
time_t ns_nongc_ctime;
|
||||
time64_t ns_ctime;
|
||||
time64_t ns_nongc_ctime;
|
||||
atomic_t ns_ndirtyblks;
|
||||
|
||||
/*
|
||||
@ -267,7 +267,7 @@ struct nilfs_root {
|
||||
|
||||
static inline int nilfs_sb_need_update(struct the_nilfs *nilfs)
|
||||
{
|
||||
u64 t = get_seconds();
|
||||
u64 t = ktime_get_real_seconds();
|
||||
|
||||
return t < nilfs->ns_sbwtime ||
|
||||
t > nilfs->ns_sbwtime + nilfs->ns_sb_update_freq;
|
||||
|
57
fs/pipe.c
57
fs/pipe.c
@ -35,11 +35,6 @@
|
||||
*/
|
||||
unsigned int pipe_max_size = 1048576;
|
||||
|
||||
/*
|
||||
* Minimum pipe size, as required by POSIX
|
||||
*/
|
||||
unsigned int pipe_min_size = PAGE_SIZE;
|
||||
|
||||
/* Maximum allocatable pages per user. Hard limit is unset by default, soft
|
||||
* matches default values.
|
||||
*/
|
||||
@ -610,12 +605,21 @@ static unsigned long account_pipe_buffers(struct user_struct *user,
|
||||
|
||||
static bool too_many_pipe_buffers_soft(unsigned long user_bufs)
|
||||
{
|
||||
return pipe_user_pages_soft && user_bufs >= pipe_user_pages_soft;
|
||||
unsigned long soft_limit = READ_ONCE(pipe_user_pages_soft);
|
||||
|
||||
return soft_limit && user_bufs > soft_limit;
|
||||
}
|
||||
|
||||
static bool too_many_pipe_buffers_hard(unsigned long user_bufs)
|
||||
{
|
||||
return pipe_user_pages_hard && user_bufs >= pipe_user_pages_hard;
|
||||
unsigned long hard_limit = READ_ONCE(pipe_user_pages_hard);
|
||||
|
||||
return hard_limit && user_bufs > hard_limit;
|
||||
}
|
||||
|
||||
static bool is_unprivileged_user(void)
|
||||
{
|
||||
return !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN);
|
||||
}
|
||||
|
||||
struct pipe_inode_info *alloc_pipe_info(void)
|
||||
@ -624,22 +628,23 @@ struct pipe_inode_info *alloc_pipe_info(void)
|
||||
unsigned long pipe_bufs = PIPE_DEF_BUFFERS;
|
||||
struct user_struct *user = get_current_user();
|
||||
unsigned long user_bufs;
|
||||
unsigned int max_size = READ_ONCE(pipe_max_size);
|
||||
|
||||
pipe = kzalloc(sizeof(struct pipe_inode_info), GFP_KERNEL_ACCOUNT);
|
||||
if (pipe == NULL)
|
||||
goto out_free_uid;
|
||||
|
||||
if (pipe_bufs * PAGE_SIZE > pipe_max_size && !capable(CAP_SYS_RESOURCE))
|
||||
pipe_bufs = pipe_max_size >> PAGE_SHIFT;
|
||||
if (pipe_bufs * PAGE_SIZE > max_size && !capable(CAP_SYS_RESOURCE))
|
||||
pipe_bufs = max_size >> PAGE_SHIFT;
|
||||
|
||||
user_bufs = account_pipe_buffers(user, 0, pipe_bufs);
|
||||
|
||||
if (too_many_pipe_buffers_soft(user_bufs)) {
|
||||
if (too_many_pipe_buffers_soft(user_bufs) && is_unprivileged_user()) {
|
||||
user_bufs = account_pipe_buffers(user, pipe_bufs, 1);
|
||||
pipe_bufs = 1;
|
||||
}
|
||||
|
||||
if (too_many_pipe_buffers_hard(user_bufs))
|
||||
if (too_many_pipe_buffers_hard(user_bufs) && is_unprivileged_user())
|
||||
goto out_revert_acct;
|
||||
|
||||
pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
|
||||
@ -1020,18 +1025,16 @@ const struct file_operations pipefifo_fops = {
|
||||
* Currently we rely on the pipe array holding a power-of-2 number
|
||||
* of pages. Returns 0 on error.
|
||||
*/
|
||||
unsigned int round_pipe_size(unsigned int size)
|
||||
unsigned int round_pipe_size(unsigned long size)
|
||||
{
|
||||
unsigned long nr_pages;
|
||||
|
||||
if (size < pipe_min_size)
|
||||
size = pipe_min_size;
|
||||
|
||||
nr_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
if (nr_pages == 0)
|
||||
if (size > (1U << 31))
|
||||
return 0;
|
||||
|
||||
return roundup_pow_of_two(nr_pages) << PAGE_SHIFT;
|
||||
/* Minimum pipe size, as required by POSIX */
|
||||
if (size < PAGE_SIZE)
|
||||
return PAGE_SIZE;
|
||||
|
||||
return roundup_pow_of_two(size);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1046,8 +1049,6 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
|
||||
long ret = 0;
|
||||
|
||||
size = round_pipe_size(arg);
|
||||
if (size == 0)
|
||||
return -EINVAL;
|
||||
nr_pages = size >> PAGE_SHIFT;
|
||||
|
||||
if (!nr_pages)
|
||||
@ -1069,7 +1070,7 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
|
||||
if (nr_pages > pipe->buffers &&
|
||||
(too_many_pipe_buffers_hard(user_bufs) ||
|
||||
too_many_pipe_buffers_soft(user_bufs)) &&
|
||||
!capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN)) {
|
||||
is_unprivileged_user()) {
|
||||
ret = -EPERM;
|
||||
goto out_revert_acct;
|
||||
}
|
||||
@ -1124,16 +1125,6 @@ out_revert_acct:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This should work even if CONFIG_PROC_FS isn't set, as proc_dopipe_max_size
|
||||
* will return an error.
|
||||
*/
|
||||
int pipe_proc_fn(struct ctl_table *table, int write, void __user *buf,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return proc_dopipe_max_size(table, write, buf, lenp, ppos);
|
||||
}
|
||||
|
||||
/*
|
||||
* After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
|
||||
* location, so checking ->i_pipe is not enough to verify that this is a
|
||||
|
@ -736,16 +736,10 @@ static int children_seq_open(struct inode *inode, struct file *file)
|
||||
return ret;
|
||||
}
|
||||
|
||||
int children_seq_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
seq_release(inode, file);
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct file_operations proc_tid_children_operations = {
|
||||
.open = children_seq_open,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = children_seq_release,
|
||||
.release = seq_release,
|
||||
};
|
||||
#endif /* CONFIG_PROC_CHILDREN */
|
||||
|
@ -75,6 +75,7 @@
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/tracehook.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/cgroup.h>
|
||||
#include <linux/cpuset.h>
|
||||
#include <linux/audit.h>
|
||||
@ -100,6 +101,8 @@
|
||||
#include "internal.h"
|
||||
#include "fd.h"
|
||||
|
||||
#include "../../lib/kstrtox.h"
|
||||
|
||||
/* NOTE:
|
||||
* Implementing inode permission operations in /proc is almost
|
||||
* certainly an error. Permission checks need to happen during
|
||||
@ -110,8 +113,8 @@
|
||||
* in /proc for a task before it execs a suid executable.
|
||||
*/
|
||||
|
||||
static u8 nlink_tid;
|
||||
static u8 nlink_tgid;
|
||||
static u8 nlink_tid __ro_after_init;
|
||||
static u8 nlink_tgid __ro_after_init;
|
||||
|
||||
struct pid_entry {
|
||||
const char *name;
|
||||
@ -1370,7 +1373,7 @@ static ssize_t proc_fail_nth_write(struct file *file, const char __user *buf,
|
||||
task = get_proc_task(file_inode(file));
|
||||
if (!task)
|
||||
return -ESRCH;
|
||||
WRITE_ONCE(task->fail_nth, n);
|
||||
task->fail_nth = n;
|
||||
put_task_struct(task);
|
||||
|
||||
return count;
|
||||
@ -1386,8 +1389,7 @@ static ssize_t proc_fail_nth_read(struct file *file, char __user *buf,
|
||||
task = get_proc_task(file_inode(file));
|
||||
if (!task)
|
||||
return -ESRCH;
|
||||
len = snprintf(numbuf, sizeof(numbuf), "%u\n",
|
||||
READ_ONCE(task->fail_nth));
|
||||
len = snprintf(numbuf, sizeof(numbuf), "%u\n", task->fail_nth);
|
||||
len = simple_read_from_buffer(buf, count, ppos, numbuf, len);
|
||||
put_task_struct(task);
|
||||
|
||||
@ -1907,8 +1909,33 @@ end_instantiate:
|
||||
static int dname_to_vma_addr(struct dentry *dentry,
|
||||
unsigned long *start, unsigned long *end)
|
||||
{
|
||||
if (sscanf(dentry->d_name.name, "%lx-%lx", start, end) != 2)
|
||||
const char *str = dentry->d_name.name;
|
||||
unsigned long long sval, eval;
|
||||
unsigned int len;
|
||||
|
||||
len = _parse_integer(str, 16, &sval);
|
||||
if (len & KSTRTOX_OVERFLOW)
|
||||
return -EINVAL;
|
||||
if (sval != (unsigned long)sval)
|
||||
return -EINVAL;
|
||||
str += len;
|
||||
|
||||
if (*str != '-')
|
||||
return -EINVAL;
|
||||
str++;
|
||||
|
||||
len = _parse_integer(str, 16, &eval);
|
||||
if (len & KSTRTOX_OVERFLOW)
|
||||
return -EINVAL;
|
||||
if (eval != (unsigned long)eval)
|
||||
return -EINVAL;
|
||||
str += len;
|
||||
|
||||
if (*str != '\0')
|
||||
return -EINVAL;
|
||||
|
||||
*start = sval;
|
||||
*end = eval;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2000,9 +2027,9 @@ out:
|
||||
}
|
||||
|
||||
struct map_files_info {
|
||||
unsigned long start;
|
||||
unsigned long end;
|
||||
fmode_t mode;
|
||||
unsigned int len;
|
||||
unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
|
||||
};
|
||||
|
||||
/*
|
||||
@ -2172,10 +2199,9 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
|
||||
if (++pos <= ctx->pos)
|
||||
continue;
|
||||
|
||||
info.start = vma->vm_start;
|
||||
info.end = vma->vm_end;
|
||||
info.mode = vma->vm_file->f_mode;
|
||||
info.len = snprintf(info.name,
|
||||
sizeof(info.name), "%lx-%lx",
|
||||
vma->vm_start, vma->vm_end);
|
||||
if (flex_array_put(fa, i++, &info, GFP_KERNEL))
|
||||
BUG();
|
||||
}
|
||||
@ -2183,9 +2209,13 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
|
||||
up_read(&mm->mmap_sem);
|
||||
|
||||
for (i = 0; i < nr_files; i++) {
|
||||
char buf[4 * sizeof(long) + 2]; /* max: %lx-%lx\0 */
|
||||
unsigned int len;
|
||||
|
||||
p = flex_array_get(fa, i);
|
||||
len = snprintf(buf, sizeof(buf), "%lx-%lx", p->start, p->end);
|
||||
if (!proc_fill_cache(file, ctx,
|
||||
p->name, p->len,
|
||||
buf, len,
|
||||
proc_map_files_instantiate,
|
||||
task,
|
||||
(void *)(unsigned long)p->mode))
|
||||
@ -3018,11 +3048,11 @@ static const struct inode_operations proc_tgid_base_inode_operations = {
|
||||
static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
|
||||
{
|
||||
struct dentry *dentry, *leader, *dir;
|
||||
char buf[PROC_NUMBUF];
|
||||
char buf[10 + 1];
|
||||
struct qstr name;
|
||||
|
||||
name.name = buf;
|
||||
name.len = snprintf(buf, sizeof(buf), "%d", pid);
|
||||
name.len = snprintf(buf, sizeof(buf), "%u", pid);
|
||||
/* no ->d_hash() rejects on procfs */
|
||||
dentry = d_hash_and_lookup(mnt->mnt_root, &name);
|
||||
if (dentry) {
|
||||
@ -3034,7 +3064,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
|
||||
return;
|
||||
|
||||
name.name = buf;
|
||||
name.len = snprintf(buf, sizeof(buf), "%d", tgid);
|
||||
name.len = snprintf(buf, sizeof(buf), "%u", tgid);
|
||||
leader = d_hash_and_lookup(mnt->mnt_root, &name);
|
||||
if (!leader)
|
||||
goto out;
|
||||
@ -3046,7 +3076,7 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
|
||||
goto out_put_leader;
|
||||
|
||||
name.name = buf;
|
||||
name.len = snprintf(buf, sizeof(buf), "%d", pid);
|
||||
name.len = snprintf(buf, sizeof(buf), "%u", pid);
|
||||
dentry = d_hash_and_lookup(dir, &name);
|
||||
if (dentry) {
|
||||
d_invalidate(dentry);
|
||||
@ -3225,14 +3255,14 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
|
||||
for (iter = next_tgid(ns, iter);
|
||||
iter.task;
|
||||
iter.tgid += 1, iter = next_tgid(ns, iter)) {
|
||||
char name[PROC_NUMBUF];
|
||||
char name[10 + 1];
|
||||
int len;
|
||||
|
||||
cond_resched();
|
||||
if (!has_pid_permissions(ns, iter.task, HIDEPID_INVISIBLE))
|
||||
continue;
|
||||
|
||||
len = snprintf(name, sizeof(name), "%d", iter.tgid);
|
||||
len = snprintf(name, sizeof(name), "%u", iter.tgid);
|
||||
ctx->pos = iter.tgid + TGID_OFFSET;
|
||||
if (!proc_fill_cache(file, ctx, name, len,
|
||||
proc_pid_instantiate, iter.task, NULL)) {
|
||||
@ -3560,10 +3590,10 @@ static int proc_task_readdir(struct file *file, struct dir_context *ctx)
|
||||
for (task = first_tid(proc_pid(inode), tid, ctx->pos - 2, ns);
|
||||
task;
|
||||
task = next_tid(task), ctx->pos++) {
|
||||
char name[PROC_NUMBUF];
|
||||
char name[10 + 1];
|
||||
int len;
|
||||
tid = task_pid_nr_ns(task, ns);
|
||||
len = snprintf(name, sizeof(name), "%d", tid);
|
||||
len = snprintf(name, sizeof(name), "%u", tid);
|
||||
if (!proc_fill_cache(file, ctx, name, len,
|
||||
proc_task_instantiate, task, NULL)) {
|
||||
/* returning this tgid failed, save it as the first
|
||||
|
@ -55,8 +55,7 @@ static int show_console_dev(struct seq_file *m, void *v)
|
||||
if (dev)
|
||||
seq_printf(m, " %4d:%d", MAJOR(dev), MINOR(dev));
|
||||
|
||||
seq_printf(m, "\n");
|
||||
|
||||
seq_putc(m, '\n');
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -236,7 +236,7 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
|
||||
for (fd = ctx->pos - 2;
|
||||
fd < files_fdtable(files)->max_fds;
|
||||
fd++, ctx->pos++) {
|
||||
char name[PROC_NUMBUF];
|
||||
char name[10 + 1];
|
||||
int len;
|
||||
|
||||
if (!fcheck_files(files, fd))
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
static DEFINE_RWLOCK(proc_subdir_lock);
|
||||
|
||||
static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
|
||||
static int proc_match(const char *name, struct proc_dir_entry *de, unsigned int len)
|
||||
{
|
||||
if (len < de->namelen)
|
||||
return -1;
|
||||
@ -60,7 +60,7 @@ static struct proc_dir_entry *pde_subdir_find(struct proc_dir_entry *dir,
|
||||
struct proc_dir_entry *de = rb_entry(node,
|
||||
struct proc_dir_entry,
|
||||
subdir_node);
|
||||
int result = proc_match(len, name, de);
|
||||
int result = proc_match(name, de, len);
|
||||
|
||||
if (result < 0)
|
||||
node = node->rb_left;
|
||||
@ -84,7 +84,7 @@ static bool pde_subdir_insert(struct proc_dir_entry *dir,
|
||||
struct proc_dir_entry *this = rb_entry(*new,
|
||||
struct proc_dir_entry,
|
||||
subdir_node);
|
||||
int result = proc_match(de->namelen, de->name, this);
|
||||
int result = proc_match(de->name, this, de->namelen);
|
||||
|
||||
parent = *new;
|
||||
if (result < 0)
|
||||
@ -211,8 +211,8 @@ void proc_free_inum(unsigned int inum)
|
||||
* Don't create negative dentries here, return -ENOENT by hand
|
||||
* instead.
|
||||
*/
|
||||
struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
|
||||
struct dentry *dentry)
|
||||
struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
|
||||
struct proc_dir_entry *de)
|
||||
{
|
||||
struct inode *inode;
|
||||
|
||||
@ -235,7 +235,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
|
||||
struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
|
||||
unsigned int flags)
|
||||
{
|
||||
return proc_lookup_de(PDE(dir), dir, dentry);
|
||||
return proc_lookup_de(dir, dentry, PDE(dir));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -247,8 +247,8 @@ struct dentry *proc_lookup(struct inode *dir, struct dentry *dentry,
|
||||
* value of the readdir() call, as long as it's non-negative
|
||||
* for success..
|
||||
*/
|
||||
int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
|
||||
struct dir_context *ctx)
|
||||
int proc_readdir_de(struct file *file, struct dir_context *ctx,
|
||||
struct proc_dir_entry *de)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -292,7 +292,7 @@ int proc_readdir(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
|
||||
return proc_readdir_de(PDE(inode), file, ctx);
|
||||
return proc_readdir_de(file, ctx, PDE(inode));
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5,6 +5,7 @@
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*/
|
||||
|
||||
#include <linux/cache.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -52,7 +53,7 @@ static void proc_evict_inode(struct inode *inode)
|
||||
}
|
||||
}
|
||||
|
||||
static struct kmem_cache * proc_inode_cachep;
|
||||
static struct kmem_cache *proc_inode_cachep __ro_after_init;
|
||||
|
||||
static struct inode *proc_alloc_inode(struct super_block *sb)
|
||||
{
|
||||
@ -128,12 +129,12 @@ enum {BIAS = -1U<<31};
|
||||
|
||||
static inline int use_pde(struct proc_dir_entry *pde)
|
||||
{
|
||||
return atomic_inc_unless_negative(&pde->in_use);
|
||||
return likely(atomic_inc_unless_negative(&pde->in_use));
|
||||
}
|
||||
|
||||
static void unuse_pde(struct proc_dir_entry *pde)
|
||||
{
|
||||
if (atomic_dec_return(&pde->in_use) == BIAS)
|
||||
if (unlikely(atomic_dec_return(&pde->in_use) == BIAS))
|
||||
complete(pde->pde_unload_completion);
|
||||
}
|
||||
|
||||
@ -166,7 +167,7 @@ static void close_pdeo(struct proc_dir_entry *pde, struct pde_opener *pdeo)
|
||||
spin_lock(&pde->pde_unload_lock);
|
||||
/* After ->release. */
|
||||
list_del(&pdeo->lh);
|
||||
if (pdeo->c)
|
||||
if (unlikely(pdeo->c))
|
||||
complete(pdeo->c);
|
||||
kfree(pdeo);
|
||||
}
|
||||
@ -420,7 +421,7 @@ static const char *proc_get_link(struct dentry *dentry,
|
||||
struct delayed_call *done)
|
||||
{
|
||||
struct proc_dir_entry *pde = PDE(inode);
|
||||
if (unlikely(!use_pde(pde)))
|
||||
if (!use_pde(pde))
|
||||
return ERR_PTR(-EINVAL);
|
||||
set_delayed_call(done, proc_put_link, pde);
|
||||
return pde->data;
|
||||
|
@ -31,24 +31,28 @@ struct mempolicy;
|
||||
* subdir_node is used to build the rb tree "subdir" of the parent.
|
||||
*/
|
||||
struct proc_dir_entry {
|
||||
/*
|
||||
* number of callers into module in progress;
|
||||
* negative -> it's going away RSN
|
||||
*/
|
||||
atomic_t in_use;
|
||||
atomic_t count; /* use count */
|
||||
struct list_head pde_openers; /* who did ->open, but not ->release */
|
||||
/* protects ->pde_openers and all struct pde_opener instances */
|
||||
spinlock_t pde_unload_lock;
|
||||
struct completion *pde_unload_completion;
|
||||
const struct inode_operations *proc_iops;
|
||||
const struct file_operations *proc_fops;
|
||||
void *data;
|
||||
unsigned int low_ino;
|
||||
umode_t mode;
|
||||
nlink_t nlink;
|
||||
kuid_t uid;
|
||||
kgid_t gid;
|
||||
loff_t size;
|
||||
const struct inode_operations *proc_iops;
|
||||
const struct file_operations *proc_fops;
|
||||
struct proc_dir_entry *parent;
|
||||
struct rb_root_cached subdir;
|
||||
struct rb_node subdir_node;
|
||||
void *data;
|
||||
atomic_t count; /* use count */
|
||||
atomic_t in_use; /* number of callers into module in progress; */
|
||||
/* negative -> it's going away RSN */
|
||||
struct completion *pde_unload_completion;
|
||||
struct list_head pde_openers; /* who did ->open, but not ->release */
|
||||
spinlock_t pde_unload_lock; /* proc_fops checks and pde_users bumps */
|
||||
umode_t mode;
|
||||
u8 namelen;
|
||||
char name[];
|
||||
} __randomize_layout;
|
||||
@ -149,10 +153,9 @@ extern bool proc_fill_cache(struct file *, struct dir_context *, const char *, i
|
||||
* generic.c
|
||||
*/
|
||||
extern struct dentry *proc_lookup(struct inode *, struct dentry *, unsigned int);
|
||||
extern struct dentry *proc_lookup_de(struct proc_dir_entry *, struct inode *,
|
||||
struct dentry *);
|
||||
struct dentry *proc_lookup_de(struct inode *, struct dentry *, struct proc_dir_entry *);
|
||||
extern int proc_readdir(struct file *, struct dir_context *);
|
||||
extern int proc_readdir_de(struct proc_dir_entry *, struct file *, struct dir_context *);
|
||||
int proc_readdir_de(struct file *, struct dir_context *, struct proc_dir_entry *);
|
||||
|
||||
static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
|
||||
{
|
||||
|
@ -512,23 +512,15 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
|
||||
return -EFAULT;
|
||||
} else {
|
||||
if (kern_addr_valid(start)) {
|
||||
unsigned long n;
|
||||
|
||||
/*
|
||||
* Using bounce buffer to bypass the
|
||||
* hardened user copy kernel text checks.
|
||||
*/
|
||||
memcpy(buf, (char *) start, tsz);
|
||||
n = copy_to_user(buffer, buf, tsz);
|
||||
/*
|
||||
* We cannot distinguish between fault on source
|
||||
* and fault on destination. When this happens
|
||||
* we clear too and hope it will trigger the
|
||||
* EFAULT again.
|
||||
*/
|
||||
if (n) {
|
||||
if (clear_user(buffer + tsz - n,
|
||||
n))
|
||||
if (probe_kernel_read(buf, (void *) start, tsz)) {
|
||||
if (clear_user(buffer, tsz))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
if (copy_to_user(buffer, buf, tsz))
|
||||
return -EFAULT;
|
||||
}
|
||||
} else {
|
||||
|
@ -135,7 +135,7 @@ static struct dentry *proc_tgid_net_lookup(struct inode *dir,
|
||||
de = ERR_PTR(-ENOENT);
|
||||
net = get_proc_task_net(dir);
|
||||
if (net != NULL) {
|
||||
de = proc_lookup_de(net->proc_net, dir, dentry);
|
||||
de = proc_lookup_de(dir, dentry, net->proc_net);
|
||||
put_net(net);
|
||||
}
|
||||
return de;
|
||||
@ -172,7 +172,7 @@ static int proc_tgid_net_readdir(struct file *file, struct dir_context *ctx)
|
||||
ret = -EINVAL;
|
||||
net = get_proc_task_net(file_inode(file));
|
||||
if (net != NULL) {
|
||||
ret = proc_readdir_de(net->proc_net, file, ctx);
|
||||
ret = proc_readdir_de(file, ctx, net->proc_net);
|
||||
put_net(net);
|
||||
}
|
||||
return ret;
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/cache.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
@ -17,11 +18,11 @@ static const char *proc_self_get_link(struct dentry *dentry,
|
||||
|
||||
if (!tgid)
|
||||
return ERR_PTR(-ENOENT);
|
||||
/* 11 for max length of signed int in decimal + NULL term */
|
||||
name = kmalloc(12, dentry ? GFP_KERNEL : GFP_ATOMIC);
|
||||
/* max length of unsigned int in decimal + NULL term */
|
||||
name = kmalloc(10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (unlikely(!name))
|
||||
return dentry ? ERR_PTR(-ENOMEM) : ERR_PTR(-ECHILD);
|
||||
sprintf(name, "%d", tgid);
|
||||
sprintf(name, "%u", tgid);
|
||||
set_delayed_call(done, kfree_link, name);
|
||||
return name;
|
||||
}
|
||||
@ -30,7 +31,7 @@ static const struct inode_operations proc_self_inode_operations = {
|
||||
.get_link = proc_self_get_link,
|
||||
};
|
||||
|
||||
static unsigned self_inum;
|
||||
static unsigned self_inum __ro_after_init;
|
||||
|
||||
int proc_setup_self(struct super_block *s)
|
||||
{
|
||||
|
@ -1,4 +1,5 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/cache.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pid_namespace.h>
|
||||
@ -18,11 +19,10 @@ static const char *proc_thread_self_get_link(struct dentry *dentry,
|
||||
|
||||
if (!pid)
|
||||
return ERR_PTR(-ENOENT);
|
||||
name = kmalloc(PROC_NUMBUF + 6 + PROC_NUMBUF,
|
||||
dentry ? GFP_KERNEL : GFP_ATOMIC);
|
||||
name = kmalloc(10 + 6 + 10 + 1, dentry ? GFP_KERNEL : GFP_ATOMIC);
|
||||
if (unlikely(!name))
|
||||
return dentry ? ERR_PTR(-ENOMEM) : ERR_PTR(-ECHILD);
|
||||
sprintf(name, "%d/task/%d", tgid, pid);
|
||||
sprintf(name, "%u/task/%u", tgid, pid);
|
||||
set_delayed_call(done, kfree_link, name);
|
||||
return name;
|
||||
}
|
||||
@ -31,7 +31,7 @@ static const struct inode_operations proc_thread_self_inode_operations = {
|
||||
.get_link = proc_thread_self_get_link,
|
||||
};
|
||||
|
||||
static unsigned thread_self_inum;
|
||||
static unsigned thread_self_inum __ro_after_init;
|
||||
|
||||
int proc_setup_thread_self(struct super_block *s)
|
||||
{
|
||||
|
@ -1178,18 +1178,16 @@ fs_initcall(vmcore_init);
|
||||
/* Cleanup function for vmcore module. */
|
||||
void vmcore_cleanup(void)
|
||||
{
|
||||
struct list_head *pos, *next;
|
||||
|
||||
if (proc_vmcore) {
|
||||
proc_remove(proc_vmcore);
|
||||
proc_vmcore = NULL;
|
||||
}
|
||||
|
||||
/* clear the vmcore list. */
|
||||
list_for_each_safe(pos, next, &vmcore_list) {
|
||||
while (!list_empty(&vmcore_list)) {
|
||||
struct vmcore *m;
|
||||
|
||||
m = list_entry(pos, struct vmcore, list);
|
||||
m = list_first_entry(&vmcore_list, struct vmcore, list);
|
||||
list_del(&m->list);
|
||||
kfree(m);
|
||||
}
|
||||
|
@ -16,6 +16,22 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
|
||||
size, unsigned long offset);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_and_bit
|
||||
/**
|
||||
* find_next_and_bit - find the next set bit in both memory regions
|
||||
* @addr1: The first address to base the search on
|
||||
* @addr2: The second address to base the search on
|
||||
* @offset: The bitnumber to start searching at
|
||||
* @size: The bitmap size in bits
|
||||
*
|
||||
* Returns the bit number for the next set bit
|
||||
* If no bits are set, returns @size.
|
||||
*/
|
||||
extern unsigned long find_next_and_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long size,
|
||||
unsigned long offset);
|
||||
#endif
|
||||
|
||||
#ifndef find_next_zero_bit
|
||||
/**
|
||||
* find_next_zero_bit - find the next cleared bit in a memory region
|
||||
@ -55,8 +71,12 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr,
|
||||
unsigned long size);
|
||||
#else /* CONFIG_GENERIC_FIND_FIRST_BIT */
|
||||
|
||||
#ifndef find_first_bit
|
||||
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
|
||||
#endif
|
||||
#ifndef find_first_zero_bit
|
||||
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
|
||||
#endif
|
||||
|
||||
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
|
||||
|
||||
|
@ -64,9 +64,14 @@
|
||||
* bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region
|
||||
* bitmap_release_region(bitmap, pos, order) Free specified bit region
|
||||
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
|
||||
* bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words)
|
||||
* bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words)
|
||||
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
|
||||
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
|
||||
*
|
||||
* Note, bitmap_zero() and bitmap_fill() operate over the region of
|
||||
* unsigned longs, that is, bits behind bitmap till the unsigned long
|
||||
* boundary will be zeroed or filled as well. Consider to use
|
||||
* bitmap_clear() or bitmap_set() to make explicit zeroing or filling
|
||||
* respectively.
|
||||
*/
|
||||
|
||||
/**
|
||||
@ -83,8 +88,12 @@
|
||||
* test_and_change_bit(bit, addr) Change bit and return old value
|
||||
* find_first_zero_bit(addr, nbits) Position first zero bit in *addr
|
||||
* find_first_bit(addr, nbits) Position first set bit in *addr
|
||||
* find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit
|
||||
* find_next_zero_bit(addr, nbits, bit)
|
||||
* Position next zero bit in *addr >= bit
|
||||
* find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit
|
||||
* find_next_and_bit(addr1, addr2, nbits, bit)
|
||||
* Same as find_next_bit, but in
|
||||
* (*addr1 & *addr2)
|
||||
*
|
||||
*/
|
||||
|
||||
@ -174,14 +183,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig,
|
||||
extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
|
||||
extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
|
||||
extern unsigned int bitmap_from_u32array(unsigned long *bitmap,
|
||||
unsigned int nbits,
|
||||
const u32 *buf,
|
||||
unsigned int nwords);
|
||||
extern unsigned int bitmap_to_u32array(u32 *buf,
|
||||
unsigned int nwords,
|
||||
const unsigned long *bitmap,
|
||||
unsigned int nbits);
|
||||
|
||||
#ifdef __BIG_ENDIAN
|
||||
extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
|
||||
#else
|
||||
@ -209,12 +211,12 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits)
|
||||
|
||||
static inline void bitmap_fill(unsigned long *dst, unsigned int nbits)
|
||||
{
|
||||
unsigned int nlongs = BITS_TO_LONGS(nbits);
|
||||
if (!small_const_nbits(nbits)) {
|
||||
unsigned int len = (nlongs - 1) * sizeof(unsigned long);
|
||||
memset(dst, 0xff, len);
|
||||
if (small_const_nbits(nbits))
|
||||
*dst = ~0UL;
|
||||
else {
|
||||
unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long);
|
||||
memset(dst, 0xff, len);
|
||||
}
|
||||
dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits);
|
||||
}
|
||||
|
||||
static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
|
||||
@ -228,6 +230,35 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src,
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy bitmap and clear tail bits in last word.
|
||||
*/
|
||||
static inline void bitmap_copy_clear_tail(unsigned long *dst,
|
||||
const unsigned long *src, unsigned int nbits)
|
||||
{
|
||||
bitmap_copy(dst, src, nbits);
|
||||
if (nbits % BITS_PER_LONG)
|
||||
dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits);
|
||||
}
|
||||
|
||||
/*
|
||||
* On 32-bit systems bitmaps are represented as u32 arrays internally, and
|
||||
* therefore conversion is not needed when copying data from/to arrays of u32.
|
||||
*/
|
||||
#if BITS_PER_LONG == 64
|
||||
extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
|
||||
unsigned int nbits);
|
||||
extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap,
|
||||
unsigned int nbits);
|
||||
#else
|
||||
#define bitmap_from_arr32(bitmap, buf, nbits) \
|
||||
bitmap_copy_clear_tail((unsigned long *) (bitmap), \
|
||||
(const unsigned long *) (buf), (nbits))
|
||||
#define bitmap_to_arr32(buf, bitmap, nbits) \
|
||||
bitmap_copy_clear_tail((unsigned long *) (buf), \
|
||||
(const unsigned long *) (bitmap), (nbits))
|
||||
#endif
|
||||
|
||||
static inline int bitmap_and(unsigned long *dst, const unsigned long *src1,
|
||||
const unsigned long *src2, unsigned int nbits)
|
||||
{
|
||||
|
@ -8,7 +8,6 @@
|
||||
#define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
|
||||
#define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0)
|
||||
#define BUILD_BUG_ON_ZERO(e) (0)
|
||||
#define BUILD_BUG_ON_NULL(e) ((void *)0)
|
||||
#define BUILD_BUG_ON_INVALID(e) (0)
|
||||
#define BUILD_BUG_ON_MSG(cond, msg) (0)
|
||||
#define BUILD_BUG_ON(condition) (0)
|
||||
@ -28,7 +27,6 @@
|
||||
* aren't permitted).
|
||||
*/
|
||||
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
|
||||
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); }))
|
||||
|
||||
/*
|
||||
* BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
|
||||
|
@ -19,3 +19,11 @@
|
||||
|
||||
#define randomized_struct_fields_start struct {
|
||||
#define randomized_struct_fields_end };
|
||||
|
||||
/* all clang versions usable with the kernel support KASAN ABI version 5 */
|
||||
#define KASAN_ABI_VERSION 5
|
||||
|
||||
/* emulate gcc's __SANITIZE_ADDRESS__ flag */
|
||||
#if __has_feature(address_sanitizer)
|
||||
#define __SANITIZE_ADDRESS__
|
||||
#endif
|
||||
|
@ -640,7 +640,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
|
||||
/**
|
||||
* cpumask_size - size to allocate for a 'struct cpumask' in bytes
|
||||
*/
|
||||
static inline size_t cpumask_size(void)
|
||||
static inline unsigned int cpumask_size(void)
|
||||
{
|
||||
return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long);
|
||||
}
|
||||
|
@ -112,7 +112,7 @@ static inline int cpuset_do_slab_mem_spread(void)
|
||||
return task_spread_slab(current);
|
||||
}
|
||||
|
||||
extern int current_cpuset_is_being_rebound(void);
|
||||
extern bool current_cpuset_is_being_rebound(void);
|
||||
|
||||
extern void rebuild_sched_domains(void);
|
||||
|
||||
@ -247,9 +247,9 @@ static inline int cpuset_do_slab_mem_spread(void)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int current_cpuset_is_being_rebound(void)
|
||||
static inline bool current_cpuset_is_being_rebound(void)
|
||||
{
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void rebuild_sched_domains(void)
|
||||
|
@ -2,13 +2,13 @@
|
||||
#ifndef LINUX_CRASH_DUMP_H
|
||||
#define LINUX_CRASH_DUMP_H
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/elf.h>
|
||||
|
||||
#include <asm/pgtable.h> /* for pgprot_t */
|
||||
|
||||
#ifdef CONFIG_CRASH_DUMP
|
||||
#define ELFCORE_ADDR_MAX (-1ULL)
|
||||
#define ELFCORE_ADDR_ERR (-2ULL)
|
||||
|
||||
@ -52,13 +52,13 @@ void vmcore_cleanup(void);
|
||||
* has passed the elf core header address on command line.
|
||||
*
|
||||
* This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will
|
||||
* return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of
|
||||
* previous kernel.
|
||||
* return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic
|
||||
* of previous kernel.
|
||||
*/
|
||||
|
||||
static inline int is_kdump_kernel(void)
|
||||
static inline bool is_kdump_kernel(void)
|
||||
{
|
||||
return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0;
|
||||
return elfcorehdr_addr != ELFCORE_ADDR_MAX;
|
||||
}
|
||||
|
||||
/* is_vmcore_usable() checks if the kernel is booting after a panic and
|
||||
@ -89,7 +89,7 @@ extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn));
|
||||
extern void unregister_oldmem_pfn_is_ram(void);
|
||||
|
||||
#else /* !CONFIG_CRASH_DUMP */
|
||||
static inline int is_kdump_kernel(void) { return 0; }
|
||||
static inline bool is_kdump_kernel(void) { return 0; }
|
||||
#endif /* CONFIG_CRASH_DUMP */
|
||||
|
||||
extern unsigned long saved_max_pfn;
|
||||
|
@ -2,6 +2,7 @@
|
||||
#ifndef GENL_MAGIC_FUNC_H
|
||||
#define GENL_MAGIC_FUNC_H
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/genl_magic_struct.h>
|
||||
|
||||
/*
|
||||
@ -132,17 +133,6 @@ static void dprint_array(const char *dir, int nla_type,
|
||||
* use one static buffer for parsing of nested attributes */
|
||||
static struct nlattr *nested_attr_tb[128];
|
||||
|
||||
#ifndef BUILD_BUG_ON
|
||||
/* Force a compilation error if condition is true */
|
||||
#define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition))
|
||||
/* Force a compilation error if condition is true, but also produce a
|
||||
result (of value 0 and type size_t), so the expression can be used
|
||||
e.g. in a structure initializer (or where-ever else comma expressions
|
||||
aren't permitted). */
|
||||
#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
|
||||
#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
|
||||
#endif
|
||||
|
||||
#undef GENL_struct
|
||||
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
|
||||
/* *_from_attrs functions are static, but potentially unused */ \
|
||||
|
@ -265,7 +265,7 @@ extern struct resource * __devm_request_region(struct device *dev,
|
||||
extern void __devm_release_region(struct device *dev, struct resource *parent,
|
||||
resource_size_t start, resource_size_t n);
|
||||
extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
|
||||
extern int iomem_is_exclusive(u64 addr);
|
||||
extern bool iomem_is_exclusive(u64 addr);
|
||||
|
||||
extern int
|
||||
walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
|
||||
|
@ -167,7 +167,7 @@ static inline int kallsyms_show_value(void)
|
||||
|
||||
static inline void print_ip_sym(unsigned long ip)
|
||||
{
|
||||
printk("[<%p>] %pS\n", (void *) ip, (void *) ip);
|
||||
printk("[<%px>] %pS\n", (void *) ip, (void *) ip);
|
||||
}
|
||||
|
||||
#endif /*_LINUX_KALLSYMS_H*/
|
||||
|
@ -11,8 +11,6 @@ struct task_struct;
|
||||
|
||||
#ifdef CONFIG_KASAN
|
||||
|
||||
#define KASAN_SHADOW_SCALE_SHIFT 3
|
||||
|
||||
#include <asm/kasan.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
@ -56,14 +54,14 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object);
|
||||
void kasan_init_slab_obj(struct kmem_cache *cache, const void *object);
|
||||
|
||||
void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags);
|
||||
void kasan_kfree_large(const void *ptr);
|
||||
void kasan_poison_kfree(void *ptr);
|
||||
void kasan_kfree_large(void *ptr, unsigned long ip);
|
||||
void kasan_poison_kfree(void *ptr, unsigned long ip);
|
||||
void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size,
|
||||
gfp_t flags);
|
||||
void kasan_krealloc(const void *object, size_t new_size, gfp_t flags);
|
||||
|
||||
void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags);
|
||||
bool kasan_slab_free(struct kmem_cache *s, void *object);
|
||||
bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
|
||||
|
||||
struct kasan_cache {
|
||||
int alloc_meta_offset;
|
||||
@ -108,8 +106,8 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache,
|
||||
const void *object) {}
|
||||
|
||||
static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {}
|
||||
static inline void kasan_kfree_large(const void *ptr) {}
|
||||
static inline void kasan_poison_kfree(void *ptr) {}
|
||||
static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
|
||||
static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {}
|
||||
static inline void kasan_kmalloc(struct kmem_cache *s, const void *object,
|
||||
size_t size, gfp_t flags) {}
|
||||
static inline void kasan_krealloc(const void *object, size_t new_size,
|
||||
@ -117,7 +115,8 @@ static inline void kasan_krealloc(const void *object, size_t new_size,
|
||||
|
||||
static inline void kasan_slab_alloc(struct kmem_cache *s, void *object,
|
||||
gfp_t flags) {}
|
||||
static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
|
||||
static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
|
||||
unsigned long ip)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ extern void lockref_mark_dead(struct lockref *);
|
||||
extern int lockref_get_not_dead(struct lockref *);
|
||||
|
||||
/* Must be called under spinlock for reliable results */
|
||||
static inline int __lockref_is_dead(const struct lockref *l)
|
||||
static inline bool __lockref_is_dead(const struct lockref *l)
|
||||
{
|
||||
return ((int)l->count < 0);
|
||||
}
|
||||
|
@ -332,8 +332,8 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit);
|
||||
void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
|
||||
void memblock_mem_limit_remove_map(phys_addr_t limit);
|
||||
bool memblock_is_memory(phys_addr_t addr);
|
||||
int memblock_is_map_memory(phys_addr_t addr);
|
||||
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
|
||||
bool memblock_is_map_memory(phys_addr_t addr);
|
||||
bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size);
|
||||
bool memblock_is_reserved(phys_addr_t addr);
|
||||
bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size);
|
||||
|
||||
|
@ -491,7 +491,7 @@ extern struct mutex module_mutex;
|
||||
/* FIXME: It'd be nice to isolate modules during init, too, so they
|
||||
aren't used before they (may) fail. But presently too much code
|
||||
(IDE & SCSI) require entry into the module during init.*/
|
||||
static inline int module_is_live(struct module *mod)
|
||||
static inline bool module_is_live(struct module *mod)
|
||||
{
|
||||
return mod->state != MODULE_STATE_GOING;
|
||||
}
|
||||
|
@ -138,9 +138,9 @@ extern void __mutex_init(struct mutex *lock, const char *name,
|
||||
* mutex_is_locked - is the mutex locked
|
||||
* @lock: the mutex to be queried
|
||||
*
|
||||
* Returns 1 if the mutex is locked, 0 if unlocked.
|
||||
* Returns true if the mutex is locked, false if unlocked.
|
||||
*/
|
||||
static inline int mutex_is_locked(struct mutex *lock)
|
||||
static inline bool mutex_is_locked(struct mutex *lock)
|
||||
{
|
||||
/*
|
||||
* XXX think about spin_is_locked
|
||||
|
@ -167,10 +167,9 @@ void pipe_lock(struct pipe_inode_info *);
|
||||
void pipe_unlock(struct pipe_inode_info *);
|
||||
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
|
||||
|
||||
extern unsigned int pipe_max_size, pipe_min_size;
|
||||
extern unsigned int pipe_max_size;
|
||||
extern unsigned long pipe_user_pages_hard;
|
||||
extern unsigned long pipe_user_pages_soft;
|
||||
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
||||
|
||||
/* Drop the inode semaphore and wait for a pipe event, atomically */
|
||||
void pipe_wait(struct pipe_inode_info *pipe);
|
||||
@ -191,6 +190,6 @@ long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
|
||||
struct pipe_inode_info *get_pipe_info(struct file *file);
|
||||
|
||||
int create_pipe_files(struct file **, int);
|
||||
unsigned int round_pipe_size(unsigned int size);
|
||||
unsigned int round_pipe_size(unsigned long size);
|
||||
|
||||
#endif
|
||||
|
@ -1497,6 +1497,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task)
|
||||
extern struct task_struct *find_task_by_vpid(pid_t nr);
|
||||
extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
|
||||
|
||||
/*
|
||||
* find a task by its virtual pid and get the task struct
|
||||
*/
|
||||
extern struct task_struct *find_get_task_by_vpid(pid_t nr);
|
||||
|
||||
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
|
||||
extern int wake_up_process(struct task_struct *tsk);
|
||||
extern void wake_up_new_task(struct task_struct *tsk);
|
||||
|
@ -51,9 +51,6 @@ extern int proc_dointvec_minmax(struct ctl_table *, int,
|
||||
extern int proc_douintvec_minmax(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
extern int proc_dopipe_max_size(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
extern int proc_dointvec_jiffies(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
|
||||
|
@ -17,6 +17,7 @@
|
||||
#define _LINUX_UUID_H_
|
||||
|
||||
#include <uapi/linux/uuid.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#define UUID_SIZE 16
|
||||
|
||||
|
@ -280,8 +280,8 @@ typedef struct siginfo {
|
||||
#define NSIGTRAP 4
|
||||
|
||||
/*
|
||||
* There are an additional set of SIGTRAP si_codes used by ptrace
|
||||
* that of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP)
|
||||
* There is an additional set of SIGTRAP si_codes used by ptrace
|
||||
* that are of the form: ((PTRACE_EVENT_XXX << 8) | SIGTRAP)
|
||||
*/
|
||||
|
||||
/*
|
||||
|
@ -19,7 +19,6 @@
|
||||
#define _UAPI_LINUX_UUID_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
typedef struct {
|
||||
__u8 b[16];
|
||||
|
@ -596,7 +596,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr,
|
||||
ewp->task = current;
|
||||
|
||||
list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
|
||||
if (walk->task->static_prio <= current->static_prio) {
|
||||
if (walk->task->prio <= current->prio) {
|
||||
list_add_tail(&ewp->list, &walk->list);
|
||||
return;
|
||||
}
|
||||
|
20
ipc/msg.c
20
ipc/msg.c
@ -476,9 +476,9 @@ static int msgctl_info(struct ipc_namespace *ns, int msqid,
|
||||
static int msgctl_stat(struct ipc_namespace *ns, int msqid,
|
||||
int cmd, struct msqid64_ds *p)
|
||||
{
|
||||
int err;
|
||||
struct msg_queue *msq;
|
||||
int success_return;
|
||||
int id = 0;
|
||||
int err;
|
||||
|
||||
memset(p, 0, sizeof(*p));
|
||||
|
||||
@ -489,14 +489,13 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid,
|
||||
err = PTR_ERR(msq);
|
||||
goto out_unlock;
|
||||
}
|
||||
success_return = msq->q_perm.id;
|
||||
id = msq->q_perm.id;
|
||||
} else {
|
||||
msq = msq_obtain_object_check(ns, msqid);
|
||||
if (IS_ERR(msq)) {
|
||||
err = PTR_ERR(msq);
|
||||
goto out_unlock;
|
||||
}
|
||||
success_return = 0;
|
||||
}
|
||||
|
||||
err = -EACCES;
|
||||
@ -507,6 +506,14 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid,
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
ipc_lock_object(&msq->q_perm);
|
||||
|
||||
if (!ipc_valid_object(&msq->q_perm)) {
|
||||
ipc_unlock_object(&msq->q_perm);
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
kernel_to_ipc64_perm(&msq->q_perm, &p->msg_perm);
|
||||
p->msg_stime = msq->q_stime;
|
||||
p->msg_rtime = msq->q_rtime;
|
||||
@ -516,9 +523,10 @@ static int msgctl_stat(struct ipc_namespace *ns, int msqid,
|
||||
p->msg_qbytes = msq->q_qbytes;
|
||||
p->msg_lspid = msq->q_lspid;
|
||||
p->msg_lrpid = msq->q_lrpid;
|
||||
rcu_read_unlock();
|
||||
|
||||
return success_return;
|
||||
ipc_unlock_object(&msq->q_perm);
|
||||
rcu_read_unlock();
|
||||
return id;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
10
ipc/sem.c
10
ipc/sem.c
@ -1213,10 +1213,20 @@ static int semctl_stat(struct ipc_namespace *ns, int semid,
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
ipc_lock_object(&sma->sem_perm);
|
||||
|
||||
if (!ipc_valid_object(&sma->sem_perm)) {
|
||||
ipc_unlock_object(&sma->sem_perm);
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
|
||||
semid64->sem_otime = get_semotime(sma);
|
||||
semid64->sem_ctime = sma->sem_ctime;
|
||||
semid64->sem_nsems = sma->sem_nsems;
|
||||
|
||||
ipc_unlock_object(&sma->sem_perm);
|
||||
rcu_read_unlock();
|
||||
return id;
|
||||
|
||||
|
20
ipc/shm.c
20
ipc/shm.c
@ -909,9 +909,11 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid,
|
||||
int cmd, struct shmid64_ds *tbuf)
|
||||
{
|
||||
struct shmid_kernel *shp;
|
||||
int result;
|
||||
int id = 0;
|
||||
int err;
|
||||
|
||||
memset(tbuf, 0, sizeof(*tbuf));
|
||||
|
||||
rcu_read_lock();
|
||||
if (cmd == SHM_STAT) {
|
||||
shp = shm_obtain_object(ns, shmid);
|
||||
@ -919,14 +921,13 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid,
|
||||
err = PTR_ERR(shp);
|
||||
goto out_unlock;
|
||||
}
|
||||
result = shp->shm_perm.id;
|
||||
id = shp->shm_perm.id;
|
||||
} else {
|
||||
shp = shm_obtain_object_check(ns, shmid);
|
||||
if (IS_ERR(shp)) {
|
||||
err = PTR_ERR(shp);
|
||||
goto out_unlock;
|
||||
}
|
||||
result = 0;
|
||||
}
|
||||
|
||||
err = -EACCES;
|
||||
@ -937,7 +938,14 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid,
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
memset(tbuf, 0, sizeof(*tbuf));
|
||||
ipc_lock_object(&shp->shm_perm);
|
||||
|
||||
if (!ipc_valid_object(&shp->shm_perm)) {
|
||||
ipc_unlock_object(&shp->shm_perm);
|
||||
err = -EIDRM;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm);
|
||||
tbuf->shm_segsz = shp->shm_segsz;
|
||||
tbuf->shm_atime = shp->shm_atim;
|
||||
@ -946,8 +954,10 @@ static int shmctl_stat(struct ipc_namespace *ns, int shmid,
|
||||
tbuf->shm_cpid = shp->shm_cprid;
|
||||
tbuf->shm_lpid = shp->shm_lprid;
|
||||
tbuf->shm_nattch = shp->shm_nattch;
|
||||
|
||||
ipc_unlock_object(&shp->shm_perm);
|
||||
rcu_read_unlock();
|
||||
return result;
|
||||
return id;
|
||||
|
||||
out_unlock:
|
||||
rcu_read_unlock();
|
||||
|
@ -23,9 +23,12 @@
|
||||
* tree.
|
||||
* - perform initial checks (capabilities, auditing and permission,
|
||||
* etc).
|
||||
* - perform read-only operations, such as STAT, INFO commands.
|
||||
* - perform read-only operations, such as INFO command, that
|
||||
* do not demand atomicity
|
||||
* acquire the ipc lock (kern_ipc_perm.lock) through
|
||||
* ipc_lock_object()
|
||||
* - perform read-only operations that demand atomicity,
|
||||
* such as STAT command.
|
||||
* - perform data updates, such as SET, RMID commands and
|
||||
* mechanism-specific operations (semop/semtimedop,
|
||||
* msgsnd/msgrcv, shmat/shmdt).
|
||||
|
@ -84,20 +84,24 @@ static atomic_t entry_count;
|
||||
|
||||
static async_cookie_t lowest_in_progress(struct async_domain *domain)
|
||||
{
|
||||
struct list_head *pending;
|
||||
struct async_entry *first = NULL;
|
||||
async_cookie_t ret = ASYNC_COOKIE_MAX;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&async_lock, flags);
|
||||
|
||||
if (domain)
|
||||
pending = &domain->pending;
|
||||
else
|
||||
pending = &async_global_pending;
|
||||
if (domain) {
|
||||
if (!list_empty(&domain->pending))
|
||||
first = list_first_entry(&domain->pending,
|
||||
struct async_entry, domain_list);
|
||||
} else {
|
||||
if (!list_empty(&async_global_pending))
|
||||
first = list_first_entry(&async_global_pending,
|
||||
struct async_entry, global_list);
|
||||
}
|
||||
|
||||
if (!list_empty(pending))
|
||||
ret = list_first_entry(pending, struct async_entry,
|
||||
domain_list)->cookie;
|
||||
if (first)
|
||||
ret = first->cookie;
|
||||
|
||||
spin_unlock_irqrestore(&async_lock, flags);
|
||||
return ret;
|
||||
|
@ -1254,9 +1254,9 @@ done:
|
||||
return retval;
|
||||
}
|
||||
|
||||
int current_cpuset_is_being_rebound(void)
|
||||
bool current_cpuset_is_being_rebound(void)
|
||||
{
|
||||
int ret;
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = task_cs(current) == cpuset_being_rebound;
|
||||
|
@ -355,7 +355,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len,
|
||||
|
||||
ret = sched_getaffinity(pid, mask);
|
||||
if (ret == 0) {
|
||||
size_t retlen = min_t(size_t, len, cpumask_size());
|
||||
unsigned int retlen = min(len, cpumask_size());
|
||||
|
||||
if (compat_put_bitmap(user_mask_ptr, cpumask_bits(mask), retlen * 8))
|
||||
ret = -EFAULT;
|
||||
|
@ -10,3 +10,7 @@ CONFIG_OPTIMIZE_INLINING=y
|
||||
# CONFIG_SLAB is not set
|
||||
# CONFIG_SLUB is not set
|
||||
CONFIG_SLOB=y
|
||||
CONFIG_CC_STACKPROTECTOR_NONE=y
|
||||
# CONFIG_CC_STACKPROTECTOR_REGULAR is not set
|
||||
# CONFIG_CC_STACKPROTECTOR_STRONG is not set
|
||||
# CONFIG_CC_STACKPROTECTOR_AUTO is not set
|
||||
|
@ -1592,6 +1592,10 @@ static __latent_entropy struct task_struct *copy_process(
|
||||
int retval;
|
||||
struct task_struct *p;
|
||||
|
||||
/*
|
||||
* Don't allow sharing the root directory with processes in a different
|
||||
* namespace
|
||||
*/
|
||||
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
@ -2067,6 +2071,8 @@ long _do_fork(unsigned long clone_flags,
|
||||
int __user *child_tidptr,
|
||||
unsigned long tls)
|
||||
{
|
||||
struct completion vfork;
|
||||
struct pid *pid;
|
||||
struct task_struct *p;
|
||||
int trace = 0;
|
||||
long nr;
|
||||
@ -2092,43 +2098,40 @@ long _do_fork(unsigned long clone_flags,
|
||||
p = copy_process(clone_flags, stack_start, stack_size,
|
||||
child_tidptr, NULL, trace, tls, NUMA_NO_NODE);
|
||||
add_latent_entropy();
|
||||
|
||||
if (IS_ERR(p))
|
||||
return PTR_ERR(p);
|
||||
|
||||
/*
|
||||
* Do this prior waking up the new thread - the thread pointer
|
||||
* might get invalid after that point, if the thread exits quickly.
|
||||
*/
|
||||
if (!IS_ERR(p)) {
|
||||
struct completion vfork;
|
||||
struct pid *pid;
|
||||
trace_sched_process_fork(current, p);
|
||||
|
||||
trace_sched_process_fork(current, p);
|
||||
pid = get_task_pid(p, PIDTYPE_PID);
|
||||
nr = pid_vnr(pid);
|
||||
|
||||
pid = get_task_pid(p, PIDTYPE_PID);
|
||||
nr = pid_vnr(pid);
|
||||
if (clone_flags & CLONE_PARENT_SETTID)
|
||||
put_user(nr, parent_tidptr);
|
||||
|
||||
if (clone_flags & CLONE_PARENT_SETTID)
|
||||
put_user(nr, parent_tidptr);
|
||||
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
p->vfork_done = &vfork;
|
||||
init_completion(&vfork);
|
||||
get_task_struct(p);
|
||||
}
|
||||
|
||||
wake_up_new_task(p);
|
||||
|
||||
/* forking complete and child started to run, tell ptracer */
|
||||
if (unlikely(trace))
|
||||
ptrace_event_pid(trace, pid);
|
||||
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
if (!wait_for_vfork_done(p, &vfork))
|
||||
ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
|
||||
}
|
||||
|
||||
put_pid(pid);
|
||||
} else {
|
||||
nr = PTR_ERR(p);
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
p->vfork_done = &vfork;
|
||||
init_completion(&vfork);
|
||||
get_task_struct(p);
|
||||
}
|
||||
|
||||
wake_up_new_task(p);
|
||||
|
||||
/* forking complete and child started to run, tell ptracer */
|
||||
if (unlikely(trace))
|
||||
ptrace_event_pid(trace, pid);
|
||||
|
||||
if (clone_flags & CLONE_VFORK) {
|
||||
if (!wait_for_vfork_done(p, &vfork))
|
||||
ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
|
||||
}
|
||||
|
||||
put_pid(pid);
|
||||
return nr;
|
||||
}
|
||||
|
||||
|
@ -862,24 +862,6 @@ static void put_pi_state(struct futex_pi_state *pi_state)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Look up the task based on what TID userspace gave us.
|
||||
* We dont trust it.
|
||||
*/
|
||||
static struct task_struct *futex_find_get_task(pid_t pid)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
rcu_read_lock();
|
||||
p = find_task_by_vpid(pid);
|
||||
if (p)
|
||||
get_task_struct(p);
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_FUTEX_PI
|
||||
|
||||
/*
|
||||
@ -1183,7 +1165,7 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key,
|
||||
*/
|
||||
if (!pid)
|
||||
return -ESRCH;
|
||||
p = futex_find_get_task(pid);
|
||||
p = find_get_task_by_vpid(pid);
|
||||
if (!p)
|
||||
return -ESRCH;
|
||||
|
||||
|
@ -10,7 +10,6 @@
|
||||
#include <linux/jiffies.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/timer.h>
|
||||
|
@ -358,7 +358,8 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
|
||||
*/
|
||||
if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
|
||||
return -EINVAL;
|
||||
if (kcov->t != NULL)
|
||||
t = current;
|
||||
if (kcov->t != NULL || t->kcov != NULL)
|
||||
return -EBUSY;
|
||||
if (arg == KCOV_TRACE_PC)
|
||||
kcov->mode = KCOV_MODE_TRACE_PC;
|
||||
@ -370,7 +371,6 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
|
||||
#endif
|
||||
else
|
||||
return -EINVAL;
|
||||
t = current;
|
||||
/* Cache in task struct for performance. */
|
||||
t->kcov_size = kcov->size;
|
||||
t->kcov_area = kcov->area;
|
||||
|
13
kernel/pid.c
13
kernel/pid.c
@ -343,6 +343,19 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
|
||||
return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
|
||||
}
|
||||
|
||||
struct task_struct *find_get_task_by_vpid(pid_t nr)
|
||||
{
|
||||
struct task_struct *task;
|
||||
|
||||
rcu_read_lock();
|
||||
task = find_task_by_vpid(nr);
|
||||
if (task)
|
||||
get_task_struct(task);
|
||||
rcu_read_unlock();
|
||||
|
||||
return task;
|
||||
}
|
||||
|
||||
struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
|
||||
{
|
||||
struct pid *pid;
|
||||
|
@ -1103,21 +1103,6 @@ int ptrace_request(struct task_struct *child, long request,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct task_struct *ptrace_get_task_struct(pid_t pid)
|
||||
{
|
||||
struct task_struct *child;
|
||||
|
||||
rcu_read_lock();
|
||||
child = find_task_by_vpid(pid);
|
||||
if (child)
|
||||
get_task_struct(child);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!child)
|
||||
return ERR_PTR(-ESRCH);
|
||||
return child;
|
||||
}
|
||||
|
||||
#ifndef arch_ptrace_attach
|
||||
#define arch_ptrace_attach(child) do { } while (0)
|
||||
#endif
|
||||
@ -1135,9 +1120,9 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
|
||||
goto out;
|
||||
}
|
||||
|
||||
child = ptrace_get_task_struct(pid);
|
||||
if (IS_ERR(child)) {
|
||||
ret = PTR_ERR(child);
|
||||
child = find_get_task_by_vpid(pid);
|
||||
if (!child) {
|
||||
ret = -ESRCH;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1281,9 +1266,9 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_long_t, request, compat_long_t, pid,
|
||||
goto out;
|
||||
}
|
||||
|
||||
child = ptrace_get_task_struct(pid);
|
||||
if (IS_ERR(child)) {
|
||||
ret = PTR_ERR(child);
|
||||
child = find_get_task_by_vpid(pid);
|
||||
if (!child) {
|
||||
ret = -ESRCH;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -611,7 +611,6 @@ free_bufs:
|
||||
|
||||
kref_put(&chan->kref, relay_destroy_channel);
|
||||
mutex_unlock(&relay_channels_mutex);
|
||||
kfree(chan);
|
||||
return NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(relay_open);
|
||||
|
@ -1576,17 +1576,17 @@ static int strict_iomem_checks;
|
||||
|
||||
/*
|
||||
* check if an address is reserved in the iomem resource tree
|
||||
* returns 1 if reserved, 0 if not reserved.
|
||||
* returns true if reserved, false if not reserved.
|
||||
*/
|
||||
int iomem_is_exclusive(u64 addr)
|
||||
bool iomem_is_exclusive(u64 addr)
|
||||
{
|
||||
struct resource *p = &iomem_resource;
|
||||
int err = 0;
|
||||
bool err = false;
|
||||
loff_t l;
|
||||
int size = PAGE_SIZE;
|
||||
|
||||
if (!strict_iomem_checks)
|
||||
return 0;
|
||||
return false;
|
||||
|
||||
addr = addr & PAGE_MASK;
|
||||
|
||||
@ -1609,7 +1609,7 @@ int iomem_is_exclusive(u64 addr)
|
||||
continue;
|
||||
if (IS_ENABLED(CONFIG_IO_STRICT_DEVMEM)
|
||||
|| p->flags & IORESOURCE_EXCLUSIVE) {
|
||||
err = 1;
|
||||
err = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -4867,7 +4867,7 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
|
||||
|
||||
ret = sched_getaffinity(pid, mask);
|
||||
if (ret == 0) {
|
||||
size_t retlen = min_t(size_t, len, cpumask_size());
|
||||
unsigned int retlen = min(len, cpumask_size());
|
||||
|
||||
if (copy_to_user(user_mask_ptr, mask, retlen))
|
||||
ret = -EFAULT;
|
||||
|
@ -218,6 +218,8 @@ static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
|
||||
static int proc_dostring_coredump(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
#endif
|
||||
static int proc_dopipe_max_size(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
#ifdef CONFIG_MAGIC_SYSRQ
|
||||
/* Note: sysrq code uses it's own private copy */
|
||||
@ -1812,8 +1814,7 @@ static struct ctl_table fs_table[] = {
|
||||
.data = &pipe_max_size,
|
||||
.maxlen = sizeof(pipe_max_size),
|
||||
.mode = 0644,
|
||||
.proc_handler = &pipe_proc_fn,
|
||||
.extra1 = &pipe_min_size,
|
||||
.proc_handler = proc_dopipe_max_size,
|
||||
},
|
||||
{
|
||||
.procname = "pipe-user-pages-hard",
|
||||
@ -2615,29 +2616,17 @@ int proc_douintvec_minmax(struct ctl_table *table, int write,
|
||||
do_proc_douintvec_minmax_conv, ¶m);
|
||||
}
|
||||
|
||||
struct do_proc_dopipe_max_size_conv_param {
|
||||
unsigned int *min;
|
||||
};
|
||||
|
||||
static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
|
||||
unsigned int *valp,
|
||||
int write, void *data)
|
||||
{
|
||||
struct do_proc_dopipe_max_size_conv_param *param = data;
|
||||
|
||||
if (write) {
|
||||
unsigned int val;
|
||||
|
||||
if (*lvalp > UINT_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
val = round_pipe_size(*lvalp);
|
||||
if (val == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (param->min && *param->min > val)
|
||||
return -ERANGE;
|
||||
|
||||
*valp = val;
|
||||
} else {
|
||||
unsigned int val = *valp;
|
||||
@ -2647,14 +2636,11 @@ static int do_proc_dopipe_max_size_conv(unsigned long *lvalp,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int proc_dopipe_max_size(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
static int proc_dopipe_max_size(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct do_proc_dopipe_max_size_conv_param param = {
|
||||
.min = (unsigned int *) table->extra1,
|
||||
};
|
||||
return do_proc_douintvec(table, write, buffer, lenp, ppos,
|
||||
do_proc_dopipe_max_size_conv, ¶m);
|
||||
do_proc_dopipe_max_size_conv, NULL);
|
||||
}
|
||||
|
||||
static void validate_coredump_safety(void)
|
||||
@ -3160,12 +3146,6 @@ int proc_douintvec_minmax(struct ctl_table *table, int write,
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int proc_dopipe_max_size(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int proc_dointvec_jiffies(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
@ -3209,7 +3189,6 @@ EXPORT_SYMBOL(proc_douintvec);
|
||||
EXPORT_SYMBOL(proc_dointvec_jiffies);
|
||||
EXPORT_SYMBOL(proc_dointvec_minmax);
|
||||
EXPORT_SYMBOL_GPL(proc_douintvec_minmax);
|
||||
EXPORT_SYMBOL_GPL(proc_dopipe_max_size);
|
||||
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
|
||||
EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
|
||||
EXPORT_SYMBOL(proc_dostring);
|
||||
|
@ -194,11 +194,7 @@ static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
|
||||
{
|
||||
struct task_struct *tsk;
|
||||
|
||||
rcu_read_lock();
|
||||
tsk = find_task_by_vpid(pid);
|
||||
if (tsk)
|
||||
get_task_struct(tsk);
|
||||
rcu_read_unlock();
|
||||
tsk = find_get_task_by_vpid(pid);
|
||||
if (!tsk)
|
||||
return -ESRCH;
|
||||
fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
|
||||
|
@ -37,7 +37,6 @@
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
@ -217,7 +217,7 @@ config ENABLE_MUST_CHECK
|
||||
config FRAME_WARN
|
||||
int "Warn for stack frames larger than (needs gcc 4.4)"
|
||||
range 0 8192
|
||||
default 0 if KASAN
|
||||
default 3072 if KASAN_EXTRA
|
||||
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
|
||||
default 1280 if (!64BIT && PARISC)
|
||||
default 1024 if (!64BIT && !PARISC)
|
||||
@ -1641,7 +1641,10 @@ config DMA_API_DEBUG
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
menu "Runtime Testing"
|
||||
menuconfig RUNTIME_TESTING_MENU
|
||||
bool "Runtime Testing"
|
||||
|
||||
if RUNTIME_TESTING_MENU
|
||||
|
||||
config LKDTM
|
||||
tristate "Linux Kernel Dump Test Tool Module"
|
||||
@ -1841,7 +1844,7 @@ config TEST_BPF
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config TEST_FIND_BIT
|
||||
config FIND_BIT_BENCHMARK
|
||||
tristate "Test find_bit functions"
|
||||
default n
|
||||
help
|
||||
@ -1929,7 +1932,7 @@ config TEST_DEBUG_VIRTUAL
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
endmenu # runtime tests
|
||||
endif # RUNTIME_TESTING_MENU
|
||||
|
||||
config MEMTEST
|
||||
bool "Memtest"
|
||||
|
@ -20,6 +20,17 @@ config KASAN
|
||||
Currently CONFIG_KASAN doesn't work with CONFIG_DEBUG_SLAB
|
||||
(the resulting kernel does not boot).
|
||||
|
||||
config KASAN_EXTRA
|
||||
bool "KAsan: extra checks"
|
||||
depends on KASAN && DEBUG_KERNEL && !COMPILE_TEST
|
||||
help
|
||||
This enables further checks in the kernel address sanitizer, for now
|
||||
it only includes the address-use-after-scope check that can lead
|
||||
to excessive kernel stack usage, frame size warnings and longer
|
||||
compile time.
|
||||
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=81715 has more
|
||||
|
||||
|
||||
choice
|
||||
prompt "Instrumentation type"
|
||||
depends on KASAN
|
||||
|
@ -46,8 +46,8 @@ obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
|
||||
obj-y += hexdump.o
|
||||
obj-$(CONFIG_TEST_HEXDUMP) += test_hexdump.o
|
||||
obj-y += kstrtox.o
|
||||
obj-$(CONFIG_FIND_BIT_BENCHMARK) += find_bit_benchmark.o
|
||||
obj-$(CONFIG_TEST_BPF) += test_bpf.o
|
||||
obj-$(CONFIG_TEST_FIND_BIT) += test_find_bit.o
|
||||
obj-$(CONFIG_TEST_FIRMWARE) += test_firmware.o
|
||||
obj-$(CONFIG_TEST_SYSCTL) += test_sysctl.o
|
||||
obj-$(CONFIG_TEST_HASH) += test_hash.o test_siphash.o
|
||||
|
143
lib/bitmap.c
143
lib/bitmap.c
@ -1105,93 +1105,6 @@ int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_allocate_region);
|
||||
|
||||
/**
|
||||
* bitmap_from_u32array - copy the contents of a u32 array of bits to bitmap
|
||||
* @bitmap: array of unsigned longs, the destination bitmap, non NULL
|
||||
* @nbits: number of bits in @bitmap
|
||||
* @buf: array of u32 (in host byte order), the source bitmap, non NULL
|
||||
* @nwords: number of u32 words in @buf
|
||||
*
|
||||
* copy min(nbits, 32*nwords) bits from @buf to @bitmap, remaining
|
||||
* bits between nword and nbits in @bitmap (if any) are cleared. In
|
||||
* last word of @bitmap, the bits beyond nbits (if any) are kept
|
||||
* unchanged.
|
||||
*
|
||||
* Return the number of bits effectively copied.
|
||||
*/
|
||||
unsigned int
|
||||
bitmap_from_u32array(unsigned long *bitmap, unsigned int nbits,
|
||||
const u32 *buf, unsigned int nwords)
|
||||
{
|
||||
unsigned int dst_idx, src_idx;
|
||||
|
||||
for (src_idx = dst_idx = 0; dst_idx < BITS_TO_LONGS(nbits); ++dst_idx) {
|
||||
unsigned long part = 0;
|
||||
|
||||
if (src_idx < nwords)
|
||||
part = buf[src_idx++];
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
if (src_idx < nwords)
|
||||
part |= ((unsigned long) buf[src_idx++]) << 32;
|
||||
#endif
|
||||
|
||||
if (dst_idx < nbits/BITS_PER_LONG)
|
||||
bitmap[dst_idx] = part;
|
||||
else {
|
||||
unsigned long mask = BITMAP_LAST_WORD_MASK(nbits);
|
||||
|
||||
bitmap[dst_idx] = (bitmap[dst_idx] & ~mask)
|
||||
| (part & mask);
|
||||
}
|
||||
}
|
||||
|
||||
return min_t(unsigned int, nbits, 32*nwords);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_from_u32array);
|
||||
|
||||
/**
|
||||
* bitmap_to_u32array - copy the contents of bitmap to a u32 array of bits
|
||||
* @buf: array of u32 (in host byte order), the dest bitmap, non NULL
|
||||
* @nwords: number of u32 words in @buf
|
||||
* @bitmap: array of unsigned longs, the source bitmap, non NULL
|
||||
* @nbits: number of bits in @bitmap
|
||||
*
|
||||
* copy min(nbits, 32*nwords) bits from @bitmap to @buf. Remaining
|
||||
* bits after nbits in @buf (if any) are cleared.
|
||||
*
|
||||
* Return the number of bits effectively copied.
|
||||
*/
|
||||
unsigned int
|
||||
bitmap_to_u32array(u32 *buf, unsigned int nwords,
|
||||
const unsigned long *bitmap, unsigned int nbits)
|
||||
{
|
||||
unsigned int dst_idx = 0, src_idx = 0;
|
||||
|
||||
while (dst_idx < nwords) {
|
||||
unsigned long part = 0;
|
||||
|
||||
if (src_idx < BITS_TO_LONGS(nbits)) {
|
||||
part = bitmap[src_idx];
|
||||
if (src_idx >= nbits/BITS_PER_LONG)
|
||||
part &= BITMAP_LAST_WORD_MASK(nbits);
|
||||
src_idx++;
|
||||
}
|
||||
|
||||
buf[dst_idx++] = part & 0xffffffffUL;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
if (dst_idx < nwords) {
|
||||
part >>= 32;
|
||||
buf[dst_idx++] = part & 0xffffffffUL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
return min_t(unsigned int, nbits, 32*nwords);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_to_u32array);
|
||||
|
||||
/**
|
||||
* bitmap_copy_le - copy a bitmap, putting the bits into little-endian order.
|
||||
* @dst: destination buffer
|
||||
@ -1214,3 +1127,59 @@ void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int n
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_copy_le);
|
||||
#endif
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
/**
|
||||
* bitmap_from_arr32 - copy the contents of u32 array of bits to bitmap
|
||||
* @bitmap: array of unsigned longs, the destination bitmap
|
||||
* @buf: array of u32 (in host byte order), the source bitmap
|
||||
* @nbits: number of bits in @bitmap
|
||||
*/
|
||||
void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf,
|
||||
unsigned int nbits)
|
||||
{
|
||||
unsigned int i, halfwords;
|
||||
|
||||
if (!nbits)
|
||||
return;
|
||||
|
||||
halfwords = DIV_ROUND_UP(nbits, 32);
|
||||
for (i = 0; i < halfwords; i++) {
|
||||
bitmap[i/2] = (unsigned long) buf[i];
|
||||
if (++i < halfwords)
|
||||
bitmap[i/2] |= ((unsigned long) buf[i]) << 32;
|
||||
}
|
||||
|
||||
/* Clear tail bits in last word beyond nbits. */
|
||||
if (nbits % BITS_PER_LONG)
|
||||
bitmap[(halfwords - 1) / 2] &= BITMAP_LAST_WORD_MASK(nbits);
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_from_arr32);
|
||||
|
||||
/**
|
||||
* bitmap_to_arr32 - copy the contents of bitmap to a u32 array of bits
|
||||
* @buf: array of u32 (in host byte order), the dest bitmap
|
||||
* @bitmap: array of unsigned longs, the source bitmap
|
||||
* @nbits: number of bits in @bitmap
|
||||
*/
|
||||
void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, unsigned int nbits)
|
||||
{
|
||||
unsigned int i, halfwords;
|
||||
|
||||
if (!nbits)
|
||||
return;
|
||||
|
||||
halfwords = DIV_ROUND_UP(nbits, 32);
|
||||
for (i = 0; i < halfwords; i++) {
|
||||
buf[i] = (u32) (bitmap[i/2] & UINT_MAX);
|
||||
if (++i < halfwords)
|
||||
buf[i] = (u32) (bitmap[i/2] >> 32);
|
||||
}
|
||||
|
||||
/* Clear tail bits in last element of array beyond nbits. */
|
||||
if (nbits % BITS_PER_LONG)
|
||||
buf[halfwords - 1] &= (u32) (UINT_MAX >> ((-nbits) & 31));
|
||||
}
|
||||
EXPORT_SYMBOL(bitmap_to_arr32);
|
||||
|
||||
#endif
|
||||
|
@ -33,10 +33,11 @@ EXPORT_SYMBOL(cpumask_next);
|
||||
int cpumask_next_and(int n, const struct cpumask *src1p,
|
||||
const struct cpumask *src2p)
|
||||
{
|
||||
while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
|
||||
if (cpumask_test_cpu(n, src2p))
|
||||
break;
|
||||
return n;
|
||||
/* -1 is a legal arg here. */
|
||||
if (n != -1)
|
||||
cpumask_check(n);
|
||||
return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p),
|
||||
nr_cpumask_bits, n + 1);
|
||||
}
|
||||
EXPORT_SYMBOL(cpumask_next_and);
|
||||
|
||||
|
@ -21,22 +21,29 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
#if !defined(find_next_bit) || !defined(find_next_zero_bit)
|
||||
#if !defined(find_next_bit) || !defined(find_next_zero_bit) || \
|
||||
!defined(find_next_and_bit)
|
||||
|
||||
/*
|
||||
* This is a common helper function for find_next_bit and
|
||||
* find_next_zero_bit. The difference is the "invert" argument, which
|
||||
* is XORed with each fetched word before searching it for one bits.
|
||||
* This is a common helper function for find_next_bit, find_next_zero_bit, and
|
||||
* find_next_and_bit. The differences are:
|
||||
* - The "invert" argument, which is XORed with each fetched word before
|
||||
* searching it for one bits.
|
||||
* - The optional "addr2", which is anded with "addr1" if present.
|
||||
*/
|
||||
static unsigned long _find_next_bit(const unsigned long *addr,
|
||||
unsigned long nbits, unsigned long start, unsigned long invert)
|
||||
static inline unsigned long _find_next_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long nbits,
|
||||
unsigned long start, unsigned long invert)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
if (unlikely(start >= nbits))
|
||||
return nbits;
|
||||
|
||||
tmp = addr[start / BITS_PER_LONG] ^ invert;
|
||||
tmp = addr1[start / BITS_PER_LONG];
|
||||
if (addr2)
|
||||
tmp &= addr2[start / BITS_PER_LONG];
|
||||
tmp ^= invert;
|
||||
|
||||
/* Handle 1st word. */
|
||||
tmp &= BITMAP_FIRST_WORD_MASK(start);
|
||||
@ -47,7 +54,10 @@ static unsigned long _find_next_bit(const unsigned long *addr,
|
||||
if (start >= nbits)
|
||||
return nbits;
|
||||
|
||||
tmp = addr[start / BITS_PER_LONG] ^ invert;
|
||||
tmp = addr1[start / BITS_PER_LONG];
|
||||
if (addr2)
|
||||
tmp &= addr2[start / BITS_PER_LONG];
|
||||
tmp ^= invert;
|
||||
}
|
||||
|
||||
return min(start + __ffs(tmp), nbits);
|
||||
@ -61,7 +71,7 @@ static unsigned long _find_next_bit(const unsigned long *addr,
|
||||
unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
return _find_next_bit(addr, size, offset, 0UL);
|
||||
return _find_next_bit(addr, NULL, size, offset, 0UL);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_bit);
|
||||
#endif
|
||||
@ -70,11 +80,21 @@ EXPORT_SYMBOL(find_next_bit);
|
||||
unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
return _find_next_bit(addr, size, offset, ~0UL);
|
||||
return _find_next_bit(addr, NULL, size, offset, ~0UL);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_zero_bit);
|
||||
#endif
|
||||
|
||||
#if !defined(find_next_and_bit)
|
||||
unsigned long find_next_and_bit(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long size,
|
||||
unsigned long offset)
|
||||
{
|
||||
return _find_next_bit(addr1, addr2, size, offset, 0UL);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_and_bit);
|
||||
#endif
|
||||
|
||||
#ifndef find_first_bit
|
||||
/*
|
||||
* Find the first set bit in a memory region.
|
||||
@ -146,15 +166,19 @@ static inline unsigned long ext2_swab(const unsigned long y)
|
||||
}
|
||||
|
||||
#if !defined(find_next_bit_le) || !defined(find_next_zero_bit_le)
|
||||
static unsigned long _find_next_bit_le(const unsigned long *addr,
|
||||
unsigned long nbits, unsigned long start, unsigned long invert)
|
||||
static inline unsigned long _find_next_bit_le(const unsigned long *addr1,
|
||||
const unsigned long *addr2, unsigned long nbits,
|
||||
unsigned long start, unsigned long invert)
|
||||
{
|
||||
unsigned long tmp;
|
||||
|
||||
if (unlikely(start >= nbits))
|
||||
return nbits;
|
||||
|
||||
tmp = addr[start / BITS_PER_LONG] ^ invert;
|
||||
tmp = addr1[start / BITS_PER_LONG];
|
||||
if (addr2)
|
||||
tmp &= addr2[start / BITS_PER_LONG];
|
||||
tmp ^= invert;
|
||||
|
||||
/* Handle 1st word. */
|
||||
tmp &= ext2_swab(BITMAP_FIRST_WORD_MASK(start));
|
||||
@ -165,7 +189,10 @@ static unsigned long _find_next_bit_le(const unsigned long *addr,
|
||||
if (start >= nbits)
|
||||
return nbits;
|
||||
|
||||
tmp = addr[start / BITS_PER_LONG] ^ invert;
|
||||
tmp = addr1[start / BITS_PER_LONG];
|
||||
if (addr2)
|
||||
tmp &= addr2[start / BITS_PER_LONG];
|
||||
tmp ^= invert;
|
||||
}
|
||||
|
||||
return min(start + __ffs(ext2_swab(tmp)), nbits);
|
||||
@ -176,7 +203,7 @@ static unsigned long _find_next_bit_le(const unsigned long *addr,
|
||||
unsigned long find_next_zero_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
return _find_next_bit_le(addr, size, offset, ~0UL);
|
||||
return _find_next_bit_le(addr, NULL, size, offset, ~0UL);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_zero_bit_le);
|
||||
#endif
|
||||
@ -185,7 +212,7 @@ EXPORT_SYMBOL(find_next_zero_bit_le);
|
||||
unsigned long find_next_bit_le(const void *addr, unsigned
|
||||
long size, unsigned long offset)
|
||||
{
|
||||
return _find_next_bit_le(addr, size, offset, 0UL);
|
||||
return _find_next_bit_le(addr, NULL, size, offset, 0UL);
|
||||
}
|
||||
EXPORT_SYMBOL(find_next_bit_le);
|
||||
#endif
|
||||
|
@ -35,6 +35,7 @@
|
||||
#define SPARSE 500
|
||||
|
||||
static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata;
|
||||
static DECLARE_BITMAP(bitmap2, BITMAP_LEN) __initdata;
|
||||
|
||||
/*
|
||||
* This is Schlemiel the Painter's algorithm. It should be called after
|
||||
@ -43,16 +44,15 @@ static DECLARE_BITMAP(bitmap, BITMAP_LEN) __initdata;
|
||||
static int __init test_find_first_bit(void *bitmap, unsigned long len)
|
||||
{
|
||||
unsigned long i, cnt;
|
||||
cycles_t cycles;
|
||||
ktime_t time;
|
||||
|
||||
cycles = get_cycles();
|
||||
time = ktime_get();
|
||||
for (cnt = i = 0; i < len; cnt++) {
|
||||
i = find_first_bit(bitmap, len);
|
||||
__clear_bit(i, bitmap);
|
||||
}
|
||||
cycles = get_cycles() - cycles;
|
||||
pr_err("find_first_bit:\t\t%llu cycles,\t%ld iterations\n",
|
||||
(u64)cycles, cnt);
|
||||
time = ktime_get() - time;
|
||||
pr_err("find_first_bit: %18llu ns, %6ld iterations\n", time, cnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -60,14 +60,13 @@ static int __init test_find_first_bit(void *bitmap, unsigned long len)
|
||||
static int __init test_find_next_bit(const void *bitmap, unsigned long len)
|
||||
{
|
||||
unsigned long i, cnt;
|
||||
cycles_t cycles;
|
||||
ktime_t time;
|
||||
|
||||
cycles = get_cycles();
|
||||
time = ktime_get();
|
||||
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
|
||||
i = find_next_bit(bitmap, BITMAP_LEN, i) + 1;
|
||||
cycles = get_cycles() - cycles;
|
||||
pr_err("find_next_bit:\t\t%llu cycles,\t%ld iterations\n",
|
||||
(u64)cycles, cnt);
|
||||
time = ktime_get() - time;
|
||||
pr_err("find_next_bit: %18llu ns, %6ld iterations\n", time, cnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -75,14 +74,13 @@ static int __init test_find_next_bit(const void *bitmap, unsigned long len)
|
||||
static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
|
||||
{
|
||||
unsigned long i, cnt;
|
||||
cycles_t cycles;
|
||||
ktime_t time;
|
||||
|
||||
cycles = get_cycles();
|
||||
time = ktime_get();
|
||||
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
|
||||
i = find_next_zero_bit(bitmap, len, i) + 1;
|
||||
cycles = get_cycles() - cycles;
|
||||
pr_err("find_next_zero_bit:\t%llu cycles,\t%ld iterations\n",
|
||||
(u64)cycles, cnt);
|
||||
time = ktime_get() - time;
|
||||
pr_err("find_next_zero_bit: %18llu ns, %6ld iterations\n", time, cnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -90,9 +88,9 @@ static int __init test_find_next_zero_bit(const void *bitmap, unsigned long len)
|
||||
static int __init test_find_last_bit(const void *bitmap, unsigned long len)
|
||||
{
|
||||
unsigned long l, cnt = 0;
|
||||
cycles_t cycles;
|
||||
ktime_t time;
|
||||
|
||||
cycles = get_cycles();
|
||||
time = ktime_get();
|
||||
do {
|
||||
cnt++;
|
||||
l = find_last_bit(bitmap, len);
|
||||
@ -100,9 +98,24 @@ static int __init test_find_last_bit(const void *bitmap, unsigned long len)
|
||||
break;
|
||||
len = l;
|
||||
} while (len);
|
||||
time = ktime_get() - time;
|
||||
pr_err("find_last_bit: %18llu ns, %6ld iterations\n", time, cnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init test_find_next_and_bit(const void *bitmap,
|
||||
const void *bitmap2, unsigned long len)
|
||||
{
|
||||
unsigned long i, cnt;
|
||||
cycles_t cycles;
|
||||
|
||||
cycles = get_cycles();
|
||||
for (cnt = i = 0; i < BITMAP_LEN; cnt++)
|
||||
i = find_next_and_bit(bitmap, bitmap2, BITMAP_LEN, i+1);
|
||||
cycles = get_cycles() - cycles;
|
||||
pr_err("find_last_bit:\t\t%llu cycles,\t%ld iterations\n",
|
||||
(u64)cycles, cnt);
|
||||
pr_err("find_next_and_bit:\t\t%llu cycles, %ld iterations\n",
|
||||
(u64)cycles, cnt);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -114,31 +127,36 @@ static int __init find_bit_test(void)
|
||||
pr_err("\nStart testing find_bit() with random-filled bitmap\n");
|
||||
|
||||
get_random_bytes(bitmap, sizeof(bitmap));
|
||||
get_random_bytes(bitmap2, sizeof(bitmap2));
|
||||
|
||||
test_find_next_bit(bitmap, BITMAP_LEN);
|
||||
test_find_next_zero_bit(bitmap, BITMAP_LEN);
|
||||
test_find_last_bit(bitmap, BITMAP_LEN);
|
||||
test_find_first_bit(bitmap, BITMAP_LEN);
|
||||
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
|
||||
|
||||
pr_err("\nStart testing find_bit() with sparse bitmap\n");
|
||||
|
||||
bitmap_zero(bitmap, BITMAP_LEN);
|
||||
bitmap_zero(bitmap2, BITMAP_LEN);
|
||||
|
||||
while (nbits--)
|
||||
while (nbits--) {
|
||||
__set_bit(prandom_u32() % BITMAP_LEN, bitmap);
|
||||
__set_bit(prandom_u32() % BITMAP_LEN, bitmap2);
|
||||
}
|
||||
|
||||
test_find_next_bit(bitmap, BITMAP_LEN);
|
||||
test_find_next_zero_bit(bitmap, BITMAP_LEN);
|
||||
test_find_last_bit(bitmap, BITMAP_LEN);
|
||||
test_find_first_bit(bitmap, BITMAP_LEN);
|
||||
test_find_next_and_bit(bitmap, bitmap2, BITMAP_LEN);
|
||||
|
||||
return 0;
|
||||
/*
|
||||
* Everything is OK. Return error just to let user run benchmark
|
||||
* again without annoying rmmod.
|
||||
*/
|
||||
return -EINVAL;
|
||||
}
|
||||
module_init(find_bit_test);
|
||||
|
||||
static void __exit test_find_bit_cleanup(void)
|
||||
{
|
||||
}
|
||||
module_exit(test_find_bit_cleanup);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
@ -163,6 +163,21 @@ static inline u32 hash_stack(unsigned long *entries, unsigned int size)
|
||||
STACK_HASH_SEED);
|
||||
}
|
||||
|
||||
/* Use our own, non-instrumented version of memcmp().
|
||||
*
|
||||
* We actually don't care about the order, just the equality.
|
||||
*/
|
||||
static inline
|
||||
int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
|
||||
unsigned int n)
|
||||
{
|
||||
for ( ; n-- ; u1++, u2++) {
|
||||
if (*u1 != *u2)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Find a stack that is equal to the one stored in entries in the hash */
|
||||
static inline struct stack_record *find_stack(struct stack_record *bucket,
|
||||
unsigned long *entries, int size,
|
||||
@ -173,10 +188,8 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
|
||||
for (found = bucket; found; found = found->next) {
|
||||
if (found->hash == hash &&
|
||||
found->size == size &&
|
||||
!memcmp(entries, found->entries,
|
||||
size * sizeof(unsigned long))) {
|
||||
!stackdepot_memcmp(entries, found->entries, size))
|
||||
return found;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ __check_eq_uint(const char *srcfile, unsigned int line,
|
||||
const unsigned int exp_uint, unsigned int x)
|
||||
{
|
||||
if (exp_uint != x) {
|
||||
pr_warn("[%s:%u] expected %u, got %u\n",
|
||||
pr_err("[%s:%u] expected %u, got %u\n",
|
||||
srcfile, line, exp_uint, x);
|
||||
return false;
|
||||
}
|
||||
@ -33,19 +33,13 @@ __check_eq_uint(const char *srcfile, unsigned int line,
|
||||
|
||||
static bool __init
|
||||
__check_eq_bitmap(const char *srcfile, unsigned int line,
|
||||
const unsigned long *exp_bmap, unsigned int exp_nbits,
|
||||
const unsigned long *bmap, unsigned int nbits)
|
||||
const unsigned long *exp_bmap, const unsigned long *bmap,
|
||||
unsigned int nbits)
|
||||
{
|
||||
if (exp_nbits != nbits) {
|
||||
pr_warn("[%s:%u] bitmap length mismatch: expected %u, got %u\n",
|
||||
srcfile, line, exp_nbits, nbits);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!bitmap_equal(exp_bmap, bmap, nbits)) {
|
||||
pr_warn("[%s:%u] bitmaps contents differ: expected \"%*pbl\", got \"%*pbl\"\n",
|
||||
srcfile, line,
|
||||
exp_nbits, exp_bmap, nbits, bmap);
|
||||
nbits, exp_bmap, nbits, bmap);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@ -66,6 +60,10 @@ __check_eq_pbl(const char *srcfile, unsigned int line,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool __init
|
||||
__check_eq_u32_array(const char *srcfile, unsigned int line,
|
||||
const u32 *exp_arr, unsigned int exp_len,
|
||||
const u32 *arr, unsigned int len) __used;
|
||||
static bool __init
|
||||
__check_eq_u32_array(const char *srcfile, unsigned int line,
|
||||
const u32 *exp_arr, unsigned int exp_len,
|
||||
@ -107,7 +105,65 @@ __check_eq_u32_array(const char *srcfile, unsigned int line,
|
||||
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
|
||||
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
|
||||
|
||||
static void __init test_zero_fill_copy(void)
|
||||
static void __init test_zero_clear(void)
|
||||
{
|
||||
DECLARE_BITMAP(bmap, 1024);
|
||||
|
||||
/* Known way to set all bits */
|
||||
memset(bmap, 0xff, 128);
|
||||
|
||||
expect_eq_pbl("0-22", bmap, 23);
|
||||
expect_eq_pbl("0-1023", bmap, 1024);
|
||||
|
||||
/* single-word bitmaps */
|
||||
bitmap_clear(bmap, 0, 9);
|
||||
expect_eq_pbl("9-1023", bmap, 1024);
|
||||
|
||||
bitmap_zero(bmap, 35);
|
||||
expect_eq_pbl("64-1023", bmap, 1024);
|
||||
|
||||
/* cross boundaries operations */
|
||||
bitmap_clear(bmap, 79, 19);
|
||||
expect_eq_pbl("64-78,98-1023", bmap, 1024);
|
||||
|
||||
bitmap_zero(bmap, 115);
|
||||
expect_eq_pbl("128-1023", bmap, 1024);
|
||||
|
||||
/* Zeroing entire area */
|
||||
bitmap_zero(bmap, 1024);
|
||||
expect_eq_pbl("", bmap, 1024);
|
||||
}
|
||||
|
||||
static void __init test_fill_set(void)
|
||||
{
|
||||
DECLARE_BITMAP(bmap, 1024);
|
||||
|
||||
/* Known way to clear all bits */
|
||||
memset(bmap, 0x00, 128);
|
||||
|
||||
expect_eq_pbl("", bmap, 23);
|
||||
expect_eq_pbl("", bmap, 1024);
|
||||
|
||||
/* single-word bitmaps */
|
||||
bitmap_set(bmap, 0, 9);
|
||||
expect_eq_pbl("0-8", bmap, 1024);
|
||||
|
||||
bitmap_fill(bmap, 35);
|
||||
expect_eq_pbl("0-63", bmap, 1024);
|
||||
|
||||
/* cross boundaries operations */
|
||||
bitmap_set(bmap, 79, 19);
|
||||
expect_eq_pbl("0-63,79-97", bmap, 1024);
|
||||
|
||||
bitmap_fill(bmap, 115);
|
||||
expect_eq_pbl("0-127", bmap, 1024);
|
||||
|
||||
/* Zeroing entire area */
|
||||
bitmap_fill(bmap, 1024);
|
||||
expect_eq_pbl("0-1023", bmap, 1024);
|
||||
}
|
||||
|
||||
static void __init test_copy(void)
|
||||
{
|
||||
DECLARE_BITMAP(bmap1, 1024);
|
||||
DECLARE_BITMAP(bmap2, 1024);
|
||||
@ -116,36 +172,20 @@ static void __init test_zero_fill_copy(void)
|
||||
bitmap_zero(bmap2, 1024);
|
||||
|
||||
/* single-word bitmaps */
|
||||
expect_eq_pbl("", bmap1, 23);
|
||||
|
||||
bitmap_fill(bmap1, 19);
|
||||
expect_eq_pbl("0-18", bmap1, 1024);
|
||||
|
||||
bitmap_set(bmap1, 0, 19);
|
||||
bitmap_copy(bmap2, bmap1, 23);
|
||||
expect_eq_pbl("0-18", bmap2, 1024);
|
||||
|
||||
bitmap_fill(bmap2, 23);
|
||||
expect_eq_pbl("0-22", bmap2, 1024);
|
||||
|
||||
bitmap_set(bmap2, 0, 23);
|
||||
bitmap_copy(bmap2, bmap1, 23);
|
||||
expect_eq_pbl("0-18", bmap2, 1024);
|
||||
|
||||
bitmap_zero(bmap1, 23);
|
||||
expect_eq_pbl("", bmap1, 1024);
|
||||
|
||||
/* multi-word bitmaps */
|
||||
bitmap_zero(bmap1, 1024);
|
||||
expect_eq_pbl("", bmap1, 1024);
|
||||
|
||||
bitmap_fill(bmap1, 109);
|
||||
expect_eq_pbl("0-108", bmap1, 1024);
|
||||
|
||||
bitmap_set(bmap1, 0, 109);
|
||||
bitmap_copy(bmap2, bmap1, 1024);
|
||||
expect_eq_pbl("0-108", bmap2, 1024);
|
||||
|
||||
bitmap_fill(bmap2, 1024);
|
||||
expect_eq_pbl("0-1023", bmap2, 1024);
|
||||
|
||||
bitmap_copy(bmap2, bmap1, 1024);
|
||||
expect_eq_pbl("0-108", bmap2, 1024);
|
||||
|
||||
@ -160,9 +200,6 @@ static void __init test_zero_fill_copy(void)
|
||||
bitmap_fill(bmap2, 1024);
|
||||
bitmap_copy(bmap2, bmap1, 97); /* ... but aligned on word length */
|
||||
expect_eq_pbl("0-108,128-1023", bmap2, 1024);
|
||||
|
||||
bitmap_zero(bmap2, 97); /* ... but 0-padded til word length */
|
||||
expect_eq_pbl("128-1023", bmap2, 1024);
|
||||
}
|
||||
|
||||
#define PARSE_TIME 0x1
|
||||
@ -255,171 +292,29 @@ static void __init test_bitmap_parselist(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void __init test_bitmap_u32_array_conversions(void)
|
||||
static void __init test_bitmap_arr32(void)
|
||||
{
|
||||
DECLARE_BITMAP(bmap1, 1024);
|
||||
DECLARE_BITMAP(bmap2, 1024);
|
||||
u32 exp_arr[32], arr[32];
|
||||
unsigned nbits;
|
||||
unsigned int nbits, next_bit, len = sizeof(exp) * 8;
|
||||
u32 arr[sizeof(exp) / 4];
|
||||
DECLARE_BITMAP(bmap2, len);
|
||||
|
||||
for (nbits = 0 ; nbits < 257 ; ++nbits) {
|
||||
const unsigned int used_u32s = DIV_ROUND_UP(nbits, 32);
|
||||
unsigned int i, rv;
|
||||
memset(arr, 0xa5, sizeof(arr));
|
||||
|
||||
bitmap_zero(bmap1, nbits);
|
||||
bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */
|
||||
for (nbits = 0; nbits < len; ++nbits) {
|
||||
bitmap_to_arr32(arr, exp, nbits);
|
||||
bitmap_from_arr32(bmap2, arr, nbits);
|
||||
expect_eq_bitmap(bmap2, exp, nbits);
|
||||
|
||||
memset(arr, 0xff, sizeof(arr));
|
||||
rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits);
|
||||
expect_eq_uint(nbits, rv);
|
||||
next_bit = find_next_bit(bmap2,
|
||||
round_up(nbits, BITS_PER_LONG), nbits);
|
||||
if (next_bit < round_up(nbits, BITS_PER_LONG))
|
||||
pr_err("bitmap_copy_arr32(nbits == %d:"
|
||||
" tail is not safely cleared: %d\n",
|
||||
nbits, next_bit);
|
||||
|
||||
memset(exp_arr, 0xff, sizeof(exp_arr));
|
||||
memset(exp_arr, 0, used_u32s*sizeof(*exp_arr));
|
||||
expect_eq_u32_array(exp_arr, 32, arr, 32);
|
||||
|
||||
bitmap_fill(bmap2, 1024);
|
||||
rv = bitmap_from_u32array(bmap2, nbits, arr, used_u32s);
|
||||
expect_eq_uint(nbits, rv);
|
||||
expect_eq_bitmap(bmap1, 1024, bmap2, 1024);
|
||||
|
||||
for (i = 0 ; i < nbits ; ++i) {
|
||||
/*
|
||||
* test conversion bitmap -> u32[]
|
||||
*/
|
||||
|
||||
bitmap_zero(bmap1, 1024);
|
||||
__set_bit(i, bmap1);
|
||||
bitmap_set(bmap1, nbits, 1024 - nbits); /* garbage */
|
||||
|
||||
memset(arr, 0xff, sizeof(arr));
|
||||
rv = bitmap_to_u32array(arr, used_u32s, bmap1, nbits);
|
||||
expect_eq_uint(nbits, rv);
|
||||
|
||||
/* 1st used u32 words contain expected bit set, the
|
||||
* remaining words are left unchanged (0xff)
|
||||
*/
|
||||
memset(exp_arr, 0xff, sizeof(exp_arr));
|
||||
memset(exp_arr, 0, used_u32s*sizeof(*exp_arr));
|
||||
exp_arr[i/32] = (1U<<(i%32));
|
||||
expect_eq_u32_array(exp_arr, 32, arr, 32);
|
||||
|
||||
|
||||
/* same, with longer array to fill
|
||||
*/
|
||||
memset(arr, 0xff, sizeof(arr));
|
||||
rv = bitmap_to_u32array(arr, 32, bmap1, nbits);
|
||||
expect_eq_uint(nbits, rv);
|
||||
|
||||
/* 1st used u32 words contain expected bit set, the
|
||||
* remaining words are all 0s
|
||||
*/
|
||||
memset(exp_arr, 0, sizeof(exp_arr));
|
||||
exp_arr[i/32] = (1U<<(i%32));
|
||||
expect_eq_u32_array(exp_arr, 32, arr, 32);
|
||||
|
||||
/*
|
||||
* test conversion u32[] -> bitmap
|
||||
*/
|
||||
|
||||
/* the 1st nbits of bmap2 are identical to
|
||||
* bmap1, the remaining bits of bmap2 are left
|
||||
* unchanged (all 1s)
|
||||
*/
|
||||
bitmap_fill(bmap2, 1024);
|
||||
rv = bitmap_from_u32array(bmap2, nbits,
|
||||
exp_arr, used_u32s);
|
||||
expect_eq_uint(nbits, rv);
|
||||
|
||||
expect_eq_bitmap(bmap1, 1024, bmap2, 1024);
|
||||
|
||||
/* same, with more bits to fill
|
||||
*/
|
||||
memset(arr, 0xff, sizeof(arr)); /* garbage */
|
||||
memset(arr, 0, used_u32s*sizeof(u32));
|
||||
arr[i/32] = (1U<<(i%32));
|
||||
|
||||
bitmap_fill(bmap2, 1024);
|
||||
rv = bitmap_from_u32array(bmap2, 1024, arr, used_u32s);
|
||||
expect_eq_uint(used_u32s*32, rv);
|
||||
|
||||
/* the 1st nbits of bmap2 are identical to
|
||||
* bmap1, the remaining bits of bmap2 are cleared
|
||||
*/
|
||||
bitmap_zero(bmap1, 1024);
|
||||
__set_bit(i, bmap1);
|
||||
expect_eq_bitmap(bmap1, 1024, bmap2, 1024);
|
||||
|
||||
|
||||
/*
|
||||
* test short conversion bitmap -> u32[] (1
|
||||
* word too short)
|
||||
*/
|
||||
if (used_u32s > 1) {
|
||||
bitmap_zero(bmap1, 1024);
|
||||
__set_bit(i, bmap1);
|
||||
bitmap_set(bmap1, nbits,
|
||||
1024 - nbits); /* garbage */
|
||||
memset(arr, 0xff, sizeof(arr));
|
||||
|
||||
rv = bitmap_to_u32array(arr, used_u32s - 1,
|
||||
bmap1, nbits);
|
||||
expect_eq_uint((used_u32s - 1)*32, rv);
|
||||
|
||||
/* 1st used u32 words contain expected
|
||||
* bit set, the remaining words are
|
||||
* left unchanged (0xff)
|
||||
*/
|
||||
memset(exp_arr, 0xff, sizeof(exp_arr));
|
||||
memset(exp_arr, 0,
|
||||
(used_u32s-1)*sizeof(*exp_arr));
|
||||
if ((i/32) < (used_u32s - 1))
|
||||
exp_arr[i/32] = (1U<<(i%32));
|
||||
expect_eq_u32_array(exp_arr, 32, arr, 32);
|
||||
}
|
||||
|
||||
/*
|
||||
* test short conversion u32[] -> bitmap (3
|
||||
* bits too short)
|
||||
*/
|
||||
if (nbits > 3) {
|
||||
memset(arr, 0xff, sizeof(arr)); /* garbage */
|
||||
memset(arr, 0, used_u32s*sizeof(*arr));
|
||||
arr[i/32] = (1U<<(i%32));
|
||||
|
||||
bitmap_zero(bmap1, 1024);
|
||||
rv = bitmap_from_u32array(bmap1, nbits - 3,
|
||||
arr, used_u32s);
|
||||
expect_eq_uint(nbits - 3, rv);
|
||||
|
||||
/* we are expecting the bit < nbits -
|
||||
* 3 (none otherwise), and the rest of
|
||||
* bmap1 unchanged (0-filled)
|
||||
*/
|
||||
bitmap_zero(bmap2, 1024);
|
||||
if (i < nbits - 3)
|
||||
__set_bit(i, bmap2);
|
||||
expect_eq_bitmap(bmap2, 1024, bmap1, 1024);
|
||||
|
||||
/* do the same with bmap1 initially
|
||||
* 1-filled
|
||||
*/
|
||||
|
||||
bitmap_fill(bmap1, 1024);
|
||||
rv = bitmap_from_u32array(bmap1, nbits - 3,
|
||||
arr, used_u32s);
|
||||
expect_eq_uint(nbits - 3, rv);
|
||||
|
||||
/* we are expecting the bit < nbits -
|
||||
* 3 (none otherwise), and the rest of
|
||||
* bmap1 unchanged (1-filled)
|
||||
*/
|
||||
bitmap_zero(bmap2, 1024);
|
||||
if (i < nbits - 3)
|
||||
__set_bit(i, bmap2);
|
||||
bitmap_set(bmap2, nbits-3, 1024 - nbits + 3);
|
||||
expect_eq_bitmap(bmap2, 1024, bmap1, 1024);
|
||||
}
|
||||
}
|
||||
if (nbits < len - 32)
|
||||
expect_eq_uint(arr[DIV_ROUND_UP(nbits, 32)],
|
||||
0xa5a5a5a5);
|
||||
}
|
||||
}
|
||||
|
||||
@ -453,8 +348,10 @@ static void noinline __init test_mem_optimisations(void)
|
||||
|
||||
static int __init test_bitmap_init(void)
|
||||
{
|
||||
test_zero_fill_copy();
|
||||
test_bitmap_u32_array_conversions();
|
||||
test_zero_clear();
|
||||
test_fill_set();
|
||||
test_copy();
|
||||
test_bitmap_arr32();
|
||||
test_bitmap_parselist();
|
||||
test_mem_optimisations();
|
||||
|
||||
|
107
lib/test_kasan.c
107
lib/test_kasan.c
@ -94,6 +94,37 @@ static noinline void __init kmalloc_pagealloc_oob_right(void)
|
||||
ptr[size] = 0;
|
||||
kfree(ptr);
|
||||
}
|
||||
|
||||
static noinline void __init kmalloc_pagealloc_uaf(void)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
||||
|
||||
pr_info("kmalloc pagealloc allocation: use-after-free\n");
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
if (!ptr) {
|
||||
pr_err("Allocation failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(ptr);
|
||||
ptr[0] = 0;
|
||||
}
|
||||
|
||||
static noinline void __init kmalloc_pagealloc_invalid_free(void)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
|
||||
|
||||
pr_info("kmalloc pagealloc allocation: invalid-free\n");
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
if (!ptr) {
|
||||
pr_err("Allocation failed\n");
|
||||
return;
|
||||
}
|
||||
|
||||
kfree(ptr + 1);
|
||||
}
|
||||
#endif
|
||||
|
||||
static noinline void __init kmalloc_large_oob_right(void)
|
||||
@ -388,7 +419,7 @@ static noinline void __init kasan_stack_oob(void)
|
||||
static noinline void __init ksize_unpoisons_memory(void)
|
||||
{
|
||||
char *ptr;
|
||||
size_t size = 123, real_size = size;
|
||||
size_t size = 123, real_size;
|
||||
|
||||
pr_info("ksize() unpoisons the whole allocated chunk\n");
|
||||
ptr = kmalloc(size, GFP_KERNEL);
|
||||
@ -472,6 +503,74 @@ static noinline void __init use_after_scope_test(void)
|
||||
p[1023] = 1;
|
||||
}
|
||||
|
||||
static noinline void __init kasan_alloca_oob_left(void)
|
||||
{
|
||||
volatile int i = 10;
|
||||
char alloca_array[i];
|
||||
char *p = alloca_array - 1;
|
||||
|
||||
pr_info("out-of-bounds to left on alloca\n");
|
||||
*(volatile char *)p;
|
||||
}
|
||||
|
||||
static noinline void __init kasan_alloca_oob_right(void)
|
||||
{
|
||||
volatile int i = 10;
|
||||
char alloca_array[i];
|
||||
char *p = alloca_array + i;
|
||||
|
||||
pr_info("out-of-bounds to right on alloca\n");
|
||||
*(volatile char *)p;
|
||||
}
|
||||
|
||||
static noinline void __init kmem_cache_double_free(void)
|
||||
{
|
||||
char *p;
|
||||
size_t size = 200;
|
||||
struct kmem_cache *cache;
|
||||
|
||||
cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
|
||||
if (!cache) {
|
||||
pr_err("Cache allocation failed\n");
|
||||
return;
|
||||
}
|
||||
pr_info("double-free on heap object\n");
|
||||
p = kmem_cache_alloc(cache, GFP_KERNEL);
|
||||
if (!p) {
|
||||
pr_err("Allocation failed\n");
|
||||
kmem_cache_destroy(cache);
|
||||
return;
|
||||
}
|
||||
|
||||
kmem_cache_free(cache, p);
|
||||
kmem_cache_free(cache, p);
|
||||
kmem_cache_destroy(cache);
|
||||
}
|
||||
|
||||
static noinline void __init kmem_cache_invalid_free(void)
|
||||
{
|
||||
char *p;
|
||||
size_t size = 200;
|
||||
struct kmem_cache *cache;
|
||||
|
||||
cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
|
||||
NULL);
|
||||
if (!cache) {
|
||||
pr_err("Cache allocation failed\n");
|
||||
return;
|
||||
}
|
||||
pr_info("invalid-free of heap object\n");
|
||||
p = kmem_cache_alloc(cache, GFP_KERNEL);
|
||||
if (!p) {
|
||||
pr_err("Allocation failed\n");
|
||||
kmem_cache_destroy(cache);
|
||||
return;
|
||||
}
|
||||
|
||||
kmem_cache_free(cache, p + 1);
|
||||
kmem_cache_destroy(cache);
|
||||
}
|
||||
|
||||
static int __init kmalloc_tests_init(void)
|
||||
{
|
||||
/*
|
||||
@ -485,6 +584,8 @@ static int __init kmalloc_tests_init(void)
|
||||
kmalloc_node_oob_right();
|
||||
#ifdef CONFIG_SLUB
|
||||
kmalloc_pagealloc_oob_right();
|
||||
kmalloc_pagealloc_uaf();
|
||||
kmalloc_pagealloc_invalid_free();
|
||||
#endif
|
||||
kmalloc_large_oob_right();
|
||||
kmalloc_oob_krealloc_more();
|
||||
@ -502,9 +603,13 @@ static int __init kmalloc_tests_init(void)
|
||||
memcg_accounted_kmem_cache();
|
||||
kasan_stack_oob();
|
||||
kasan_global_oob();
|
||||
kasan_alloca_oob_left();
|
||||
kasan_alloca_oob_right();
|
||||
ksize_unpoisons_memory();
|
||||
copy_user_test();
|
||||
use_after_scope_test();
|
||||
kmem_cache_double_free();
|
||||
kmem_cache_invalid_free();
|
||||
|
||||
kasan_restore_multi_shot(multishot);
|
||||
|
||||
|
@ -39,5 +39,11 @@ exit:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void __exit test_sort_exit(void)
|
||||
{
|
||||
}
|
||||
|
||||
module_init(test_sort_init);
|
||||
module_exit(test_sort_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user