diff options
Diffstat (limited to 'lib')
34 files changed, 781 insertions, 359 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 61cce0686b53..6c1b8f184267 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -139,27 +139,22 @@ config TRACE_MMIO_ACCESS source "lib/crypto/Kconfig" config CRC_CCITT - tristate "CRC-CCITT functions" + tristate help - This option is provided for the case where no in-kernel-tree - modules require CRC-CCITT functions, but a module built outside - the kernel tree does. Such modules that use library CRC-CCITT - functions require M here. + The CRC-CCITT library functions. Select this if your module uses any + of the functions from <linux/crc-ccitt.h>. config CRC16 - tristate "CRC16 functions" + tristate help - This option is provided for the case where no in-kernel-tree - modules require CRC16 functions, but a module built outside - the kernel tree does. Such modules that use library CRC16 - functions require M here. + The CRC16 library functions. Select this if your module uses any of + the functions from <linux/crc16.h>. config CRC_T10DIF - tristate "CRC calculation for the T10 Data Integrity Field" + tristate help - This option is only needed if a module that's not in the - kernel tree needs to calculate CRC checks for use with the - SCSI data integrity subsystem. + The CRC-T10DIF library functions. Select this if your module uses + any of the functions from <linux/crc-t10dif.h>. config ARCH_HAS_CRC_T10DIF bool @@ -169,22 +164,17 @@ config CRC_T10DIF_ARCH default CRC_T10DIF if ARCH_HAS_CRC_T10DIF && CRC_OPTIMIZATIONS config CRC_ITU_T - tristate "CRC ITU-T V.41 functions" + tristate help - This option is provided for the case where no in-kernel-tree - modules require CRC ITU-T V.41 functions, but a module built outside - the kernel tree does. Such modules that use library CRC ITU-T V.41 - functions require M here. + The CRC-ITU-T library functions. Select this if your module uses + any of the functions from <linux/crc-itu-t.h>. config CRC32 - tristate "CRC32/CRC32c functions" - default y + tristate select BITREVERSE help - This option is provided for the case where no in-kernel-tree - modules require CRC32/CRC32c functions, but a module built outside - the kernel tree does. Such modules that use library CRC32/CRC32c - functions require M here. + The CRC32 library functions. Select this if your module uses any of + the functions from <linux/crc32.h> or <linux/crc32c.h>. config ARCH_HAS_CRC32 bool @@ -195,6 +185,9 @@ config CRC32_ARCH config CRC64 tristate + help + The CRC64 library functions. Select this if your module uses any of + the functions from <linux/crc64.h>. config ARCH_HAS_CRC64 bool @@ -205,19 +198,21 @@ config CRC64_ARCH config CRC4 tristate + help + The CRC4 library functions. Select this if your module uses any of + the functions from <linux/crc4.h>. config CRC7 tristate - -config LIBCRC32C - tristate - select CRC32 help - This option just selects CRC32 and is provided for compatibility - purposes until the users are updated to select CRC32 directly. + The CRC7 library functions. Select this if your module uses any of + the functions from <linux/crc7.h>. config CRC8 tristate + help + The CRC8 library functions. Select this if your module uses any of + the functions from <linux/crc8.h>. config CRC_OPTIMIZATIONS bool "Enable optimized CRC implementations" if EXPERT diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index df9587aa5c5e..f9051ab610d5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -335,12 +335,12 @@ config DEBUG_INFO_COMPRESSED_ZLIB Compress the debug information using zlib. Requires GCC 5.0+ or Clang 5.0+, binutils 2.26+, and zlib. - Users of dpkg-deb via scripts/package/builddeb may find an increase in + Users of dpkg-deb via debian/rules may find an increase in size of their debug .deb packages with this config set, due to the debug info being compressed with zlib, then the object files being recompressed with a different compression scheme. But this is still - preferable to setting $KDEB_COMPRESS to "none" which would be even - larger. + preferable to setting KDEB_COMPRESS or DPKG_DEB_COMPRESSOR_TYPE to + "none" which would be even larger. config DEBUG_INFO_COMPRESSED_ZSTD bool "Compress debugging information with zstd" @@ -473,7 +473,6 @@ config READABLE_ASM config HEADERS_INSTALL bool "Install uapi headers to usr/include" - depends on !UML help This option will install uapi headers (headers exported to user-space) into the usr/include directory for use during the kernel build. @@ -3291,7 +3290,7 @@ config GCD_KUNIT_TEST config PRIME_NUMBERS_KUNIT_TEST tristate "Prime number generator test" if !KUNIT_ALL_TESTS depends on KUNIT - select PRIME_NUMBERS + depends on PRIME_NUMBERS default KUNIT_ALL_TESTS help This option enables the KUnit test suite for the {is,next}_prime_number diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 4216b3a4ff21..f6ea0c5b5da3 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -118,7 +118,6 @@ config UBSAN_UNREACHABLE config UBSAN_INTEGER_WRAP bool "Perform checking for integer arithmetic wrap-around" - default UBSAN depends on !COMPILE_TEST depends on $(cc-option,-fsanitize-undefined-ignore-overflow-pattern=all) depends on $(cc-option,-fsanitize=signed-integer-overflow) diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c index 1d893e313614..c7f602fa7b23 100644 --- a/lib/alloc_tag.c +++ b/lib/alloc_tag.c @@ -350,18 +350,28 @@ static bool needs_section_mem(struct module *mod, unsigned long size) return size >= sizeof(struct alloc_tag); } -static struct alloc_tag *find_used_tag(struct alloc_tag *from, struct alloc_tag *to) +static bool clean_unused_counters(struct alloc_tag *start_tag, + struct alloc_tag *end_tag) { - while (from <= to) { + struct alloc_tag *tag; + bool ret = true; + + for (tag = start_tag; tag <= end_tag; tag++) { struct alloc_tag_counters counter; - counter = alloc_tag_read(from); - if (counter.bytes) - return from; - from++; + if (!tag->counters) + continue; + + counter = alloc_tag_read(tag); + if (!counter.bytes) { + free_percpu(tag->counters); + tag->counters = NULL; + } else { + ret = false; + } } - return NULL; + return ret; } /* Called with mod_area_mt locked */ @@ -371,12 +381,16 @@ static void clean_unused_module_areas_locked(void) struct module *val; mas_for_each(&mas, val, module_tags.size) { + struct alloc_tag *start_tag; + struct alloc_tag *end_tag; + if (val != &unloaded_mod) continue; /* Release area if all tags are unused */ - if (!find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), - (struct alloc_tag *)(module_tags.start_addr + mas.last))) + start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); + end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); + if (clean_unused_counters(start_tag, end_tag)) mas_erase(&mas); } } @@ -422,11 +436,20 @@ static int vm_module_tags_populate(void) unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN); unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN); unsigned long more_pages; - unsigned long nr; + unsigned long nr = 0; more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT; - nr = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, - NUMA_NO_NODE, more_pages, next_page); + while (nr < more_pages) { + unsigned long allocated; + + allocated = alloc_pages_bulk_node(GFP_KERNEL | __GFP_NOWARN, + NUMA_NO_NODE, more_pages - nr, next_page + nr); + + if (!allocated) + break; + nr += allocated; + } + if (nr < more_pages || vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL, next_page, PAGE_SHIFT) < 0) { @@ -552,7 +575,8 @@ unlock: static void release_module_tags(struct module *mod, bool used) { MA_STATE(mas, &mod_area_mt, module_tags.size, module_tags.size); - struct alloc_tag *tag; + struct alloc_tag *start_tag; + struct alloc_tag *end_tag; struct module *val; mas_lock(&mas); @@ -566,15 +590,22 @@ static void release_module_tags(struct module *mod, bool used) if (!used) goto release_area; - /* Find out if the area is used */ - tag = find_used_tag((struct alloc_tag *)(module_tags.start_addr + mas.index), - (struct alloc_tag *)(module_tags.start_addr + mas.last)); - if (tag) { - struct alloc_tag_counters counter = alloc_tag_read(tag); + start_tag = (struct alloc_tag *)(module_tags.start_addr + mas.index); + end_tag = (struct alloc_tag *)(module_tags.start_addr + mas.last); + if (!clean_unused_counters(start_tag, end_tag)) { + struct alloc_tag *tag; + + for (tag = start_tag; tag <= end_tag; tag++) { + struct alloc_tag_counters counter; + + if (!tag->counters) + continue; - pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", - tag->ct.filename, tag->ct.lineno, tag->ct.modname, - tag->ct.function, counter.bytes); + counter = alloc_tag_read(tag); + pr_info("%s:%u module %s func:%s has %llu allocated at module unload\n", + tag->ct.filename, tag->ct.lineno, tag->ct.modname, + tag->ct.function, counter.bytes); + } } else { used = false; } @@ -587,6 +618,34 @@ out: mas_unlock(&mas); } +static void load_module(struct module *mod, struct codetag *start, struct codetag *stop) +{ + /* Allocate module alloc_tag percpu counters */ + struct alloc_tag *start_tag; + struct alloc_tag *stop_tag; + struct alloc_tag *tag; + + if (!mod) + return; + + start_tag = ct_to_alloc_tag(start); + stop_tag = ct_to_alloc_tag(stop); + for (tag = start_tag; tag < stop_tag; tag++) { + WARN_ON(tag->counters); + tag->counters = alloc_percpu(struct alloc_tag_counters); + if (!tag->counters) { + while (--tag >= start_tag) { + free_percpu(tag->counters); + tag->counters = NULL; + } + shutdown_mem_profiling(true); + pr_err("Failed to allocate memory for allocation tag percpu counters in the module %s. Memory allocation profiling is disabled!\n", + mod->name); + break; + } + } +} + static void replace_module(struct module *mod, struct module *new_mod) { MA_STATE(mas, &mod_area_mt, 0, module_tags.size); @@ -748,6 +807,7 @@ static int __init alloc_tag_init(void) .needs_section_mem = needs_section_mem, .alloc_section_mem = reserve_module_tags, .free_section_mem = release_module_tags, + .module_load = load_module, .module_replaced = replace_module, #endif }; diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c index 13da529e2e72..5738ae286b41 100644 --- a/lib/asn1_decoder.c +++ b/lib/asn1_decoder.c @@ -518,4 +518,5 @@ error: } EXPORT_SYMBOL_GPL(asn1_ber_decoder); +MODULE_DESCRIPTION("Decoder for ASN.1 BER/DER/CER encoded bytestream"); MODULE_LICENSE("GPL"); diff --git a/lib/codetag.c b/lib/codetag.c index 42aadd6c1454..de332e98d6f5 100644 --- a/lib/codetag.c +++ b/lib/codetag.c @@ -194,7 +194,7 @@ static int codetag_module_init(struct codetag_type *cttype, struct module *mod) if (err >= 0) { cttype->count += range_size(cttype, &range); if (cttype->desc.module_load) - cttype->desc.module_load(cttype, cmod); + cttype->desc.module_load(mod, range.start, range.stop); } up_write(&cttype->mod_lock); @@ -333,7 +333,8 @@ void codetag_unload_module(struct module *mod) } if (found) { if (cttype->desc.module_unload) - cttype->desc.module_unload(cttype, cmod); + cttype->desc.module_unload(cmod->mod, + cmod->range.start, cmod->range.stop); cttype->count -= range_size(cttype, &cmod->range); idr_remove(&cttype->mod_idr, mod_id); diff --git a/lib/crc16.c b/lib/crc16.c index 5c3a803c01e0..9c71eda9bf4b 100644 --- a/lib/crc16.c +++ b/lib/crc16.c @@ -8,7 +8,7 @@ #include <linux/crc16.h> /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ -u16 const crc16_table[256] = { +static const u16 crc16_table[256] = { 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, @@ -42,20 +42,19 @@ u16 const crc16_table[256] = { 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 }; -EXPORT_SYMBOL(crc16_table); /** * crc16 - compute the CRC-16 for the data buffer * @crc: previous CRC value - * @buffer: data pointer + * @p: data pointer * @len: number of bytes in the buffer * * Returns the updated CRC value. */ -u16 crc16(u16 crc, u8 const *buffer, size_t len) +u16 crc16(u16 crc, const u8 *p, size_t len) { while (len--) - crc = crc16_byte(crc, *buffer++); + crc = (crc >> 8) ^ crc16_table[(crc & 0xff) ^ *p++]; return crc; } EXPORT_SYMBOL(crc16); diff --git a/lib/crc32.c b/lib/crc32.c index fddd424ff224..e690026f44f7 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -1,3 +1,4 @@ +// SPDX-License-Identifier: GPL-2.0-only /* * Aug 8, 2011 Bob Pearson with help from Joakim Tjernlund and George Spelvin * cleaned up code to current version of sparse and added the slicing-by-8 @@ -19,9 +20,6 @@ * drivers/net/smc9194.c uses seed ~0, doesn't xor with ~0. * fs/jffs2 uses seed 0, doesn't xor with ~0. * fs/partitions/efi.c uses seed ~0, xor's with ~0. - * - * This source code is licensed under the GNU General Public License, - * Version 2. See the file COPYING for more details. */ /* see: Documentation/staging/crc32.rst for a description of algorithms */ diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig index 798972b29b68..1ec1466108cc 100644 --- a/lib/crypto/Kconfig +++ b/lib/crypto/Kconfig @@ -50,22 +50,16 @@ config CRYPTO_ARCH_HAVE_LIB_CHACHA config CRYPTO_LIB_CHACHA_GENERIC tristate + default CRYPTO_LIB_CHACHA if !CRYPTO_ARCH_HAVE_LIB_CHACHA select CRYPTO_LIB_UTILS help - This symbol can be depended upon by arch implementations of the - ChaCha library interface that require the generic code as a - fallback, e.g., for SIMD implementations. If no arch specific - implementation is enabled, this implementation serves the users - of CRYPTO_LIB_CHACHA. - -config CRYPTO_LIB_CHACHA_INTERNAL - tristate - select CRYPTO_LIB_CHACHA_GENERIC if CRYPTO_ARCH_HAVE_LIB_CHACHA=n + This symbol can be selected by arch implementations of the ChaCha + library interface that require the generic code as a fallback, e.g., + for SIMD implementations. If no arch specific implementation is + enabled, this implementation serves the users of CRYPTO_LIB_CHACHA. config CRYPTO_LIB_CHACHA tristate - select CRYPTO - select CRYPTO_LIB_CHACHA_INTERNAL help Enable the ChaCha library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one @@ -120,21 +114,15 @@ config CRYPTO_ARCH_HAVE_LIB_POLY1305 config CRYPTO_LIB_POLY1305_GENERIC tristate + default CRYPTO_LIB_POLY1305 if !CRYPTO_ARCH_HAVE_LIB_POLY1305 help - This symbol can be depended upon by arch implementations of the - Poly1305 library interface that require the generic code as a - fallback, e.g., for SIMD implementations. If no arch specific - implementation is enabled, this implementation serves the users - of CRYPTO_LIB_POLY1305. - -config CRYPTO_LIB_POLY1305_INTERNAL - tristate - select CRYPTO_LIB_POLY1305_GENERIC if CRYPTO_ARCH_HAVE_LIB_POLY1305=n + This symbol can be selected by arch implementations of the Poly1305 + library interface that require the generic code as a fallback, e.g., + for SIMD implementations. If no arch specific implementation is + enabled, this implementation serves the users of CRYPTO_LIB_POLY1305. config CRYPTO_LIB_POLY1305 tristate - select CRYPTO - select CRYPTO_LIB_POLY1305_INTERNAL help Enable the Poly1305 library interface. This interface may be fulfilled by either the generic implementation or an arch-specific one, if one @@ -151,5 +139,62 @@ config CRYPTO_LIB_SHA1 config CRYPTO_LIB_SHA256 tristate + help + Enable the SHA-256 library interface. This interface may be fulfilled + by either the generic implementation or an arch-specific one, if one + is available and enabled. + +config CRYPTO_ARCH_HAVE_LIB_SHA256 + bool + help + Declares whether the architecture provides an arch-specific + accelerated implementation of the SHA-256 library interface. + +config CRYPTO_ARCH_HAVE_LIB_SHA256_SIMD + bool + help + Declares whether the architecture provides an arch-specific + accelerated implementation of the SHA-256 library interface + that is SIMD-based and therefore not usable in hardirq + context. + +config CRYPTO_LIB_SHA256_GENERIC + tristate + default CRYPTO_LIB_SHA256 if !CRYPTO_ARCH_HAVE_LIB_SHA256 + help + This symbol can be selected by arch implementations of the SHA-256 + library interface that require the generic code as a fallback, e.g., + for SIMD implementations. If no arch specific implementation is + enabled, this implementation serves the users of CRYPTO_LIB_SHA256. + +config CRYPTO_LIB_SM3 + tristate + +if !KMSAN # avoid false positives from assembly +if ARM +source "arch/arm/lib/crypto/Kconfig" +endif +if ARM64 +source "arch/arm64/lib/crypto/Kconfig" +endif +if MIPS +source "arch/mips/lib/crypto/Kconfig" +endif +if PPC +source "arch/powerpc/lib/crypto/Kconfig" +endif +if RISCV +source "arch/riscv/lib/crypto/Kconfig" +endif +if S390 +source "arch/s390/lib/crypto/Kconfig" +endif +if SPARC +source "arch/sparc/lib/crypto/Kconfig" +endif +if X86 +source "arch/x86/lib/crypto/Kconfig" +endif +endif endmenu diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile index 01fac1cd05a1..3e79283b617d 100644 --- a/lib/crypto/Makefile +++ b/lib/crypto/Makefile @@ -25,9 +25,11 @@ obj-$(CONFIG_CRYPTO_LIB_GF128MUL) += gf128mul.o obj-y += libblake2s.o libblake2s-y := blake2s.o libblake2s-$(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) += blake2s-generic.o +libblake2s-$(CONFIG_CRYPTO_SELFTESTS) += blake2s-selftest.o obj-$(CONFIG_CRYPTO_LIB_CHACHA20POLY1305) += libchacha20poly1305.o libchacha20poly1305-y += chacha20poly1305.o +libchacha20poly1305-$(CONFIG_CRYPTO_SELFTESTS) += chacha20poly1305-selftest.o obj-$(CONFIG_CRYPTO_LIB_CURVE25519_GENERIC) += libcurve25519-generic.o libcurve25519-generic-y := curve25519-fiat32.o @@ -36,27 +38,31 @@ libcurve25519-generic-y += curve25519-generic.o obj-$(CONFIG_CRYPTO_LIB_CURVE25519) += libcurve25519.o libcurve25519-y += curve25519.o +libcurve25519-$(CONFIG_CRYPTO_SELFTESTS) += curve25519-selftest.o obj-$(CONFIG_CRYPTO_LIB_DES) += libdes.o libdes-y := des.o -obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305.o -libpoly1305-y := poly1305-donna32.o -libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o +obj-$(CONFIG_CRYPTO_LIB_POLY1305) += libpoly1305.o libpoly1305-y += poly1305.o +obj-$(CONFIG_CRYPTO_LIB_POLY1305_GENERIC) += libpoly1305-generic.o +libpoly1305-generic-y := poly1305-donna32.o +libpoly1305-generic-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o +libpoly1305-generic-y += poly1305-generic.o + obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o libsha1-y := sha1.o obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o libsha256-y := sha256.o -ifneq ($(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS),y) -libblake2s-y += blake2s-selftest.o -libchacha20poly1305-y += chacha20poly1305-selftest.o -libcurve25519-y += curve25519-selftest.o -endif +obj-$(CONFIG_CRYPTO_LIB_SHA256_GENERIC) += libsha256-generic.o +libsha256-generic-y := sha256-generic.o obj-$(CONFIG_MPILIB) += mpi/ -obj-$(CONFIG_CRYPTO_MANAGER_EXTRA_TESTS) += simd.o +obj-$(CONFIG_CRYPTO_SELFTESTS) += simd.o + +obj-$(CONFIG_CRYPTO_LIB_SM3) += libsm3.o +libsm3-y := sm3.o diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c index 749dc1258a44..437613265e14 100644 --- a/lib/crypto/aescfb.c +++ b/lib/crypto/aescfb.c @@ -99,7 +99,7 @@ MODULE_DESCRIPTION("Generic AES-CFB library"); MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); MODULE_LICENSE("GPL"); -#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#ifdef CONFIG_CRYPTO_SELFTESTS /* * Test code below. Vectors taken from crypto/testmgr.h diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c index 902e49410aaf..277824d6b4af 100644 --- a/lib/crypto/aesgcm.c +++ b/lib/crypto/aesgcm.c @@ -199,7 +199,7 @@ MODULE_DESCRIPTION("Generic AES-GCM library"); MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>"); MODULE_LICENSE("GPL"); -#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS +#ifdef CONFIG_CRYPTO_SELFTESTS /* * Test code below. Vectors taken from crypto/testmgr.h diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 71a316552cc5..b0f9a678300b 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -60,7 +60,7 @@ EXPORT_SYMBOL(blake2s_final); static int __init blake2s_mod_init(void) { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && + if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) && WARN_ON(!blake2s_selftest())) return -ENODEV; return 0; diff --git a/lib/crypto/chacha.c b/lib/crypto/chacha.c index 3cdda3b5ee06..ced87dd31a97 100644 --- a/lib/crypto/chacha.c +++ b/lib/crypto/chacha.c @@ -13,8 +13,9 @@ #include <linux/unaligned.h> #include <crypto/chacha.h> -static void chacha_permute(u32 *x, int nrounds) +static void chacha_permute(struct chacha_state *state, int nrounds) { + u32 *x = state->x; int i; /* whitelist the allowed round counts */ @@ -65,34 +66,34 @@ static void chacha_permute(u32 *x, int nrounds) /** * chacha_block_generic - generate one keystream block and increment block counter - * @state: input state matrix (16 32-bit words) - * @stream: output keystream block (64 bytes) + * @state: input state matrix + * @out: output keystream block * @nrounds: number of rounds (20 or 12; 20 is recommended) * * This is the ChaCha core, a function from 64-byte strings to 64-byte strings. * The caller has already converted the endianness of the input. This function * also handles incrementing the block counter in the input matrix. */ -void chacha_block_generic(u32 *state, u8 *stream, int nrounds) +void chacha_block_generic(struct chacha_state *state, + u8 out[CHACHA_BLOCK_SIZE], int nrounds) { - u32 x[16]; + struct chacha_state permuted_state = *state; int i; - memcpy(x, state, 64); + chacha_permute(&permuted_state, nrounds); - chacha_permute(x, nrounds); + for (i = 0; i < ARRAY_SIZE(state->x); i++) + put_unaligned_le32(permuted_state.x[i] + state->x[i], + &out[i * sizeof(u32)]); - for (i = 0; i < ARRAY_SIZE(x); i++) - put_unaligned_le32(x[i] + state[i], &stream[i * sizeof(u32)]); - - state[12]++; + state->x[12]++; } EXPORT_SYMBOL(chacha_block_generic); /** * hchacha_block_generic - abbreviated ChaCha core, for XChaCha - * @state: input state matrix (16 32-bit words) - * @stream: output (8 32-bit words) + * @state: input state matrix + * @out: the output words * @nrounds: number of rounds (20 or 12; 20 is recommended) * * HChaCha is the ChaCha equivalent of HSalsa and is an intermediate step @@ -100,15 +101,14 @@ EXPORT_SYMBOL(chacha_block_generic); * skips the final addition of the initial state, and outputs only certain words * of the state. It should not be used for streaming directly. */ -void hchacha_block_generic(const u32 *state, u32 *stream, int nrounds) +void hchacha_block_generic(const struct chacha_state *state, + u32 out[HCHACHA_OUT_WORDS], int nrounds) { - u32 x[16]; - - memcpy(x, state, 64); + struct chacha_state permuted_state = *state; - chacha_permute(x, nrounds); + chacha_permute(&permuted_state, nrounds); - memcpy(&stream[0], &x[0], 16); - memcpy(&stream[4], &x[12], 16); + memcpy(&out[0], &permuted_state.x[0], 16); + memcpy(&out[4], &permuted_state.x[12], 16); } EXPORT_SYMBOL(hchacha_block_generic); diff --git a/lib/crypto/chacha20poly1305-selftest.c b/lib/crypto/chacha20poly1305-selftest.c index 2ea61c28be4f..e4c85bc5a6d7 100644 --- a/lib/crypto/chacha20poly1305-selftest.c +++ b/lib/crypto/chacha20poly1305-selftest.c @@ -8832,7 +8832,7 @@ chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len, { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; - u32 chacha20_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha20_state; union { u8 block0[POLY1305_KEY_SIZE]; __le64 lens[2]; @@ -8844,12 +8844,12 @@ chacha20poly1305_encrypt_bignonce(u8 *dst, const u8 *src, const size_t src_len, memcpy(&bottom_row[4], nonce, 12); for (i = 0; i < 8; ++i) le_key[i] = get_unaligned_le32(key + sizeof(le_key[i]) * i); - chacha_init(chacha20_state, le_key, bottom_row); - chacha20_crypt(chacha20_state, b.block0, b.block0, sizeof(b.block0)); + chacha_init(&chacha20_state, le_key, bottom_row); + chacha20_crypt(&chacha20_state, b.block0, b.block0, sizeof(b.block0)); poly1305_init(&poly1305_state, b.block0); poly1305_update(&poly1305_state, ad, ad_len); poly1305_update(&poly1305_state, pad0, (0x10 - ad_len) & 0xf); - chacha20_crypt(chacha20_state, dst, src, src_len); + chacha20_crypt(&chacha20_state, dst, src, src_len); poly1305_update(&poly1305_state, dst, src_len); poly1305_update(&poly1305_state, pad0, (0x10 - src_len) & 0xf); b.lens[0] = cpu_to_le64(ad_len); diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c index 9cfa886f1f89..e29eed49a5a1 100644 --- a/lib/crypto/chacha20poly1305.c +++ b/lib/crypto/chacha20poly1305.c @@ -18,8 +18,6 @@ #include <linux/mm.h> #include <linux/module.h> -#define CHACHA_KEY_WORDS (CHACHA_KEY_SIZE / sizeof(u32)) - static void chacha_load_key(u32 *k, const u8 *in) { k[0] = get_unaligned_le32(in); @@ -32,7 +30,8 @@ static void chacha_load_key(u32 *k, const u8 *in) k[7] = get_unaligned_le32(in + 28); } -static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce) +static void xchacha_init(struct chacha_state *chacha_state, + const u8 *key, const u8 *nonce) { u32 k[CHACHA_KEY_WORDS]; u8 iv[CHACHA_IV_SIZE]; @@ -54,7 +53,8 @@ static void xchacha_init(u32 *chacha_state, const u8 *key, const u8 *nonce) static void __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, - const u8 *ad, const size_t ad_len, u32 *chacha_state) + const u8 *ad, const size_t ad_len, + struct chacha_state *chacha_state) { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; @@ -82,7 +82,7 @@ __chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, poly1305_final(&poly1305_state, dst + src_len); - memzero_explicit(chacha_state, CHACHA_STATE_WORDS * sizeof(u32)); + chacha_zeroize_state(chacha_state); memzero_explicit(&b, sizeof(b)); } @@ -91,7 +91,7 @@ void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u32 k[CHACHA_KEY_WORDS]; __le64 iv[2]; @@ -100,8 +100,9 @@ void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, iv[0] = 0; iv[1] = cpu_to_le64(nonce); - chacha_init(chacha_state, k, (u8 *)iv); - __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state); + chacha_init(&chacha_state, k, (u8 *)iv); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + &chacha_state); memzero_explicit(iv, sizeof(iv)); memzero_explicit(k, sizeof(k)); @@ -113,16 +114,18 @@ void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; - xchacha_init(chacha_state, key, nonce); - __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, chacha_state); + xchacha_init(&chacha_state, key, nonce); + __chacha20poly1305_encrypt(dst, src, src_len, ad, ad_len, + &chacha_state); } EXPORT_SYMBOL(xchacha20poly1305_encrypt); static bool __chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, - const u8 *ad, const size_t ad_len, u32 *chacha_state) + const u8 *ad, const size_t ad_len, + struct chacha_state *chacha_state) { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; @@ -169,7 +172,7 @@ bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u64 nonce, const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; u32 k[CHACHA_KEY_WORDS]; __le64 iv[2]; bool ret; @@ -179,11 +182,11 @@ bool chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, iv[0] = 0; iv[1] = cpu_to_le64(nonce); - chacha_init(chacha_state, k, (u8 *)iv); + chacha_init(&chacha_state, k, (u8 *)iv); ret = __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, - chacha_state); + &chacha_state); - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); memzero_explicit(iv, sizeof(iv)); memzero_explicit(k, sizeof(k)); return ret; @@ -195,11 +198,11 @@ bool xchacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE], const u8 key[CHACHA20POLY1305_KEY_SIZE]) { - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; - xchacha_init(chacha_state, key, nonce); + xchacha_init(&chacha_state, key, nonce); return __chacha20poly1305_decrypt(dst, src, src_len, ad, ad_len, - chacha_state); + &chacha_state); } EXPORT_SYMBOL(xchacha20poly1305_decrypt); @@ -213,7 +216,7 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, { const u8 *pad0 = page_address(ZERO_PAGE(0)); struct poly1305_desc_ctx poly1305_state; - u32 chacha_state[CHACHA_STATE_WORDS]; + struct chacha_state chacha_state; struct sg_mapping_iter miter; size_t partial = 0; unsigned int flags; @@ -240,8 +243,8 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, b.iv[0] = 0; b.iv[1] = cpu_to_le64(nonce); - chacha_init(chacha_state, b.k, (u8 *)b.iv); - chacha20_crypt(chacha_state, b.block0, pad0, sizeof(b.block0)); + chacha_init(&chacha_state, b.k, (u8 *)b.iv); + chacha20_crypt(&chacha_state, b.block0, pad0, sizeof(b.block0)); poly1305_init(&poly1305_state, b.block0); if (unlikely(ad_len)) { @@ -276,13 +279,13 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, if (unlikely(length < sl)) l &= ~(CHACHA_BLOCK_SIZE - 1); - chacha20_crypt(chacha_state, addr, addr, l); + chacha20_crypt(&chacha_state, addr, addr, l); addr += l; length -= l; } if (unlikely(length > 0)) { - chacha20_crypt(chacha_state, b.chacha_stream, pad0, + chacha20_crypt(&chacha_state, b.chacha_stream, pad0, CHACHA_BLOCK_SIZE); crypto_xor(addr, b.chacha_stream, length); partial = length; @@ -323,7 +326,7 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src, !crypto_memneq(b.mac[0], b.mac[1], POLY1305_DIGEST_SIZE); } - memzero_explicit(chacha_state, sizeof(chacha_state)); + chacha_zeroize_state(&chacha_state); memzero_explicit(&b, sizeof(b)); return ret; @@ -355,7 +358,7 @@ EXPORT_SYMBOL(chacha20poly1305_decrypt_sg_inplace); static int __init chacha20poly1305_init(void) { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && + if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) && WARN_ON(!chacha20poly1305_selftest())) return -ENODEV; return 0; diff --git a/lib/crypto/curve25519.c b/lib/crypto/curve25519.c index 064b352c6907..6850b76a80c9 100644 --- a/lib/crypto/curve25519.c +++ b/lib/crypto/curve25519.c @@ -15,7 +15,7 @@ static int __init curve25519_init(void) { - if (!IS_ENABLED(CONFIG_CRYPTO_MANAGER_DISABLE_TESTS) && + if (IS_ENABLED(CONFIG_CRYPTO_SELFTESTS) && WARN_ON(!curve25519_selftest())) return -ENODEV; return 0; diff --git a/lib/crypto/libchacha.c b/lib/crypto/libchacha.c index cc1be0496eb9..ebcca381e248 100644 --- a/lib/crypto/libchacha.c +++ b/lib/crypto/libchacha.c @@ -12,7 +12,7 @@ #include <crypto/algapi.h> // for crypto_xor_cpy #include <crypto/chacha.h> -void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, +void chacha_crypt_generic(struct chacha_state *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds) { /* aligned to potentially speed up crypto_xor() */ diff --git a/lib/crypto/poly1305-generic.c b/lib/crypto/poly1305-generic.c new file mode 100644 index 000000000000..a73f700fa1fb --- /dev/null +++ b/lib/crypto/poly1305-generic.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Poly1305 authenticator algorithm, RFC7539 + * + * Copyright (C) 2015 Martin Willi + * + * Based on public domain code by Andrew Moon and Daniel J. Bernstein. + */ + +#include <crypto/internal/poly1305.h> +#include <linux/kernel.h> +#include <linux/module.h> + +void poly1305_block_init_generic(struct poly1305_block_state *desc, + const u8 raw_key[POLY1305_BLOCK_SIZE]) +{ + poly1305_core_init(&desc->h); + poly1305_core_setkey(&desc->core_r, raw_key); +} +EXPORT_SYMBOL_GPL(poly1305_block_init_generic); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); +MODULE_DESCRIPTION("Poly1305 algorithm (generic implementation)"); diff --git a/lib/crypto/poly1305.c b/lib/crypto/poly1305.c index 6e80214ebad8..5f2f2af3b59f 100644 --- a/lib/crypto/poly1305.c +++ b/lib/crypto/poly1305.c @@ -7,72 +7,67 @@ * Based on public domain code by Andrew Moon and Daniel J. Bernstein. */ +#include <crypto/internal/blockhash.h> #include <crypto/internal/poly1305.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/string.h> #include <linux/unaligned.h> -void poly1305_init_generic(struct poly1305_desc_ctx *desc, - const u8 key[POLY1305_KEY_SIZE]) +void poly1305_init(struct poly1305_desc_ctx *desc, + const u8 key[POLY1305_KEY_SIZE]) { - poly1305_core_setkey(&desc->core_r, key); desc->s[0] = get_unaligned_le32(key + 16); desc->s[1] = get_unaligned_le32(key + 20); desc->s[2] = get_unaligned_le32(key + 24); desc->s[3] = get_unaligned_le32(key + 28); - poly1305_core_init(&desc->h); desc->buflen = 0; - desc->sset = true; - desc->rset = 2; + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_block_init_arch(&desc->state, key); + else + poly1305_block_init_generic(&desc->state, key); } -EXPORT_SYMBOL_GPL(poly1305_init_generic); +EXPORT_SYMBOL(poly1305_init); -void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src, - unsigned int nbytes) +static inline void poly1305_blocks(struct poly1305_block_state *state, + const u8 *src, unsigned int len) { - unsigned int bytes; - - if (unlikely(desc->buflen)) { - bytes = min(nbytes, POLY1305_BLOCK_SIZE - desc->buflen); - memcpy(desc->buf + desc->buflen, src, bytes); - src += bytes; - nbytes -= bytes; - desc->buflen += bytes; - - if (desc->buflen == POLY1305_BLOCK_SIZE) { - poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, - 1, 1); - desc->buflen = 0; - } - } - - if (likely(nbytes >= POLY1305_BLOCK_SIZE)) { - poly1305_core_blocks(&desc->h, &desc->core_r, src, - nbytes / POLY1305_BLOCK_SIZE, 1); - src += nbytes - (nbytes % POLY1305_BLOCK_SIZE); - nbytes %= POLY1305_BLOCK_SIZE; - } + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_blocks_arch(state, src, len, 1); + else + poly1305_blocks_generic(state, src, len, 1); +} - if (unlikely(nbytes)) { - desc->buflen = nbytes; - memcpy(desc->buf, src, nbytes); - } +void poly1305_update(struct poly1305_desc_ctx *desc, + const u8 *src, unsigned int nbytes) +{ + desc->buflen = BLOCK_HASH_UPDATE(poly1305_blocks, &desc->state, + src, nbytes, POLY1305_BLOCK_SIZE, + desc->buf, desc->buflen); } -EXPORT_SYMBOL_GPL(poly1305_update_generic); +EXPORT_SYMBOL(poly1305_update); -void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *dst) +void poly1305_final(struct poly1305_desc_ctx *desc, u8 *dst) { if (unlikely(desc->buflen)) { desc->buf[desc->buflen++] = 1; memset(desc->buf + desc->buflen, 0, POLY1305_BLOCK_SIZE - desc->buflen); - poly1305_core_blocks(&desc->h, &desc->core_r, desc->buf, 1, 0); + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_blocks_arch(&desc->state, desc->buf, + POLY1305_BLOCK_SIZE, 0); + else + poly1305_blocks_generic(&desc->state, desc->buf, + POLY1305_BLOCK_SIZE, 0); } - poly1305_core_emit(&desc->h, desc->s, dst); + if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305)) + poly1305_emit_arch(&desc->state.h, dst, desc->s); + else + poly1305_emit_generic(&desc->state.h, dst, desc->s); *desc = (struct poly1305_desc_ctx){}; } -EXPORT_SYMBOL_GPL(poly1305_final_generic); +EXPORT_SYMBOL(poly1305_final); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Willi <martin@strongswan.org>"); diff --git a/lib/crypto/sha256-generic.c b/lib/crypto/sha256-generic.c new file mode 100644 index 000000000000..a16ad4f25ebb --- /dev/null +++ b/lib/crypto/sha256-generic.c @@ -0,0 +1,137 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * SHA-256, as specified in + * http://csrc.nist.gov/groups/STM/cavp/documents/shs/sha256-384-512.pdf + * + * SHA-256 code by Jean-Luc Cooke <jlcooke@certainkey.com>. + * + * Copyright (c) Jean-Luc Cooke <jlcooke@certainkey.com> + * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> + * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> + * Copyright (c) 2014 Red Hat Inc. + */ + +#include <crypto/internal/sha2.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> + +static const u32 SHA256_K[] = { + 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, + 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, + 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, + 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, + 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, + 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, + 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, + 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, + 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, + 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, + 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, + 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, + 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, + 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, + 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, + 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, +}; + +static inline u32 Ch(u32 x, u32 y, u32 z) +{ + return z ^ (x & (y ^ z)); +} + +static inline u32 Maj(u32 x, u32 y, u32 z) +{ + return (x & y) | (z & (x | y)); +} + +#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) +#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) +#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) +#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) + +static inline void LOAD_OP(int I, u32 *W, const u8 *input) +{ + W[I] = get_unaligned_be32((__u32 *)input + I); +} + +static inline void BLEND_OP(int I, u32 *W) +{ + W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; +} + +#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \ + u32 t1, t2; \ + t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \ + t2 = e0(a) + Maj(a, b, c); \ + d += t1; \ + h = t1 + t2; \ +} while (0) + +static void sha256_block_generic(u32 state[SHA256_STATE_WORDS], + const u8 *input, u32 W[64]) +{ + u32 a, b, c, d, e, f, g, h; + int i; + + /* load the input */ + for (i = 0; i < 16; i += 8) { + LOAD_OP(i + 0, W, input); + LOAD_OP(i + 1, W, input); + LOAD_OP(i + 2, W, input); + LOAD_OP(i + 3, W, input); + LOAD_OP(i + 4, W, input); + LOAD_OP(i + 5, W, input); + LOAD_OP(i + 6, W, input); + LOAD_OP(i + 7, W, input); + } + + /* now blend */ + for (i = 16; i < 64; i += 8) { + BLEND_OP(i + 0, W); + BLEND_OP(i + 1, W); + BLEND_OP(i + 2, W); + BLEND_OP(i + 3, W); + BLEND_OP(i + 4, W); + BLEND_OP(i + 5, W); + BLEND_OP(i + 6, W); + BLEND_OP(i + 7, W); + } + + /* load the state into our registers */ + a = state[0]; b = state[1]; c = state[2]; d = state[3]; + e = state[4]; f = state[5]; g = state[6]; h = state[7]; + + /* now iterate */ + for (i = 0; i < 64; i += 8) { + SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h); + SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g); + SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f); + SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e); + SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d); + SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c); + SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b); + SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a); + } + + state[0] += a; state[1] += b; state[2] += c; state[3] += d; + state[4] += e; state[5] += f; state[6] += g; state[7] += h; +} + +void sha256_blocks_generic(u32 state[SHA256_STATE_WORDS], + const u8 *data, size_t nblocks) +{ + u32 W[64]; + + do { + sha256_block_generic(state, data, W); + data += SHA256_BLOCK_SIZE; + } while (--nblocks); + + memzero_explicit(W, sizeof(W)); +} +EXPORT_SYMBOL_GPL(sha256_blocks_generic); + +MODULE_DESCRIPTION("SHA-256 Algorithm (generic implementation)"); +MODULE_LICENSE("GPL"); diff --git a/lib/crypto/sha256.c b/lib/crypto/sha256.c index 04c1f2557e6c..107e5162507a 100644 --- a/lib/crypto/sha256.c +++ b/lib/crypto/sha256.c @@ -11,151 +11,65 @@ * Copyright (c) 2014 Red Hat Inc. */ -#include <linux/unaligned.h> -#include <crypto/sha256_base.h> +#include <crypto/internal/blockhash.h> +#include <crypto/internal/sha2.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> -static const u32 SHA256_K[] = { - 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, - 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, - 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, - 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, - 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, - 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, - 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, - 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, - 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, - 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, - 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, - 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, - 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, - 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, - 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, - 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, -}; +/* + * If __DISABLE_EXPORTS is defined, then this file is being compiled for a + * pre-boot environment. In that case, ignore the kconfig options, pull the + * generic code into the same translation unit, and use that only. + */ +#ifdef __DISABLE_EXPORTS +#include "sha256-generic.c" +#endif -static inline u32 Ch(u32 x, u32 y, u32 z) +static inline bool sha256_purgatory(void) { - return z ^ (x & (y ^ z)); + return __is_defined(__DISABLE_EXPORTS); } -static inline u32 Maj(u32 x, u32 y, u32 z) +static inline void sha256_blocks(u32 state[SHA256_STATE_WORDS], const u8 *data, + size_t nblocks) { - return (x & y) | (z & (x | y)); + sha256_choose_blocks(state, data, nblocks, sha256_purgatory(), false); } -#define e0(x) (ror32(x, 2) ^ ror32(x, 13) ^ ror32(x, 22)) -#define e1(x) (ror32(x, 6) ^ ror32(x, 11) ^ ror32(x, 25)) -#define s0(x) (ror32(x, 7) ^ ror32(x, 18) ^ (x >> 3)) -#define s1(x) (ror32(x, 17) ^ ror32(x, 19) ^ (x >> 10)) - -static inline void LOAD_OP(int I, u32 *W, const u8 *input) +void sha256_update(struct sha256_state *sctx, const u8 *data, size_t len) { - W[I] = get_unaligned_be32((__u32 *)input + I); -} + size_t partial = sctx->count % SHA256_BLOCK_SIZE; -static inline void BLEND_OP(int I, u32 *W) -{ - W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16]; -} - -#define SHA256_ROUND(i, a, b, c, d, e, f, g, h) do { \ - u32 t1, t2; \ - t1 = h + e1(e) + Ch(e, f, g) + SHA256_K[i] + W[i]; \ - t2 = e0(a) + Maj(a, b, c); \ - d += t1; \ - h = t1 + t2; \ -} while (0) - -static void sha256_transform(u32 *state, const u8 *input, u32 *W) -{ - u32 a, b, c, d, e, f, g, h; - int i; - - /* load the input */ - for (i = 0; i < 16; i += 8) { - LOAD_OP(i + 0, W, input); - LOAD_OP(i + 1, W, input); - LOAD_OP(i + 2, W, input); - LOAD_OP(i + 3, W, input); - LOAD_OP(i + 4, W, input); - LOAD_OP(i + 5, W, input); - LOAD_OP(i + 6, W, input); - LOAD_OP(i + 7, W, input); - } - - /* now blend */ - for (i = 16; i < 64; i += 8) { - BLEND_OP(i + 0, W); - BLEND_OP(i + 1, W); - BLEND_OP(i + 2, W); - BLEND_OP(i + 3, W); - BLEND_OP(i + 4, W); - BLEND_OP(i + 5, W); - BLEND_OP(i + 6, W); - BLEND_OP(i + 7, W); - } - - /* load the state into our registers */ - a = state[0]; b = state[1]; c = state[2]; d = state[3]; - e = state[4]; f = state[5]; g = state[6]; h = state[7]; - - /* now iterate */ - for (i = 0; i < 64; i += 8) { - SHA256_ROUND(i + 0, a, b, c, d, e, f, g, h); - SHA256_ROUND(i + 1, h, a, b, c, d, e, f, g); - SHA256_ROUND(i + 2, g, h, a, b, c, d, e, f); - SHA256_ROUND(i + 3, f, g, h, a, b, c, d, e); - SHA256_ROUND(i + 4, e, f, g, h, a, b, c, d); - SHA256_ROUND(i + 5, d, e, f, g, h, a, b, c); - SHA256_ROUND(i + 6, c, d, e, f, g, h, a, b); - SHA256_ROUND(i + 7, b, c, d, e, f, g, h, a); - } - - state[0] += a; state[1] += b; state[2] += c; state[3] += d; - state[4] += e; state[5] += f; state[6] += g; state[7] += h; -} - -static void sha256_transform_blocks(struct sha256_state *sctx, - const u8 *input, int blocks) -{ - u32 W[64]; - - do { - sha256_transform(sctx->state, input, W); - input += SHA256_BLOCK_SIZE; - } while (--blocks); - - memzero_explicit(W, sizeof(W)); -} - -void sha256_update(struct sha256_state *sctx, const u8 *data, unsigned int len) -{ - lib_sha256_base_do_update(sctx, data, len, sha256_transform_blocks); + sctx->count += len; + BLOCK_HASH_UPDATE_BLOCKS(sha256_blocks, sctx->ctx.state, data, len, + SHA256_BLOCK_SIZE, sctx->buf, partial); } EXPORT_SYMBOL(sha256_update); -static void __sha256_final(struct sha256_state *sctx, u8 *out, int digest_size) +static inline void __sha256_final(struct sha256_state *sctx, u8 *out, + size_t digest_size) { - lib_sha256_base_do_finalize(sctx, sha256_transform_blocks); - lib_sha256_base_finish(sctx, out, digest_size); + size_t partial = sctx->count % SHA256_BLOCK_SIZE; + + sha256_finup(&sctx->ctx, sctx->buf, partial, out, digest_size, + sha256_purgatory(), false); + memzero_explicit(sctx, sizeof(*sctx)); } -void sha256_final(struct sha256_state *sctx, u8 *out) +void sha256_final(struct sha256_state *sctx, u8 out[SHA256_DIGEST_SIZE]) { - __sha256_final(sctx, out, 32); + __sha256_final(sctx, out, SHA256_DIGEST_SIZE); } EXPORT_SYMBOL(sha256_final); -void sha224_final(struct sha256_state *sctx, u8 *out) +void sha224_final(struct sha256_state *sctx, u8 out[SHA224_DIGEST_SIZE]) { - __sha256_final(sctx, out, 28); + __sha256_final(sctx, out, SHA224_DIGEST_SIZE); } EXPORT_SYMBOL(sha224_final); -void sha256(const u8 *data, unsigned int len, u8 *out) +void sha256(const u8 *data, size_t len, u8 out[SHA256_DIGEST_SIZE]) { struct sha256_state sctx; diff --git a/lib/crypto/sm3.c b/lib/crypto/sm3.c new file mode 100644 index 000000000000..efff0e267d84 --- /dev/null +++ b/lib/crypto/sm3.c @@ -0,0 +1,185 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SM3 secure hash, as specified by OSCCA GM/T 0004-2012 SM3 and described + * at https://datatracker.ietf.org/doc/html/draft-sca-cfrg-sm3-02 + * + * Copyright (C) 2017 ARM Limited or its affiliates. + * Copyright (C) 2017 Gilad Ben-Yossef <gilad@benyossef.com> + * Copyright (C) 2021 Tianjia Zhang <tianjia.zhang@linux.alibaba.com> + */ + +#include <crypto/sm3.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/string.h> +#include <linux/unaligned.h> + +static const u32 ____cacheline_aligned K[64] = { + 0x79cc4519, 0xf3988a32, 0xe7311465, 0xce6228cb, + 0x9cc45197, 0x3988a32f, 0x7311465e, 0xe6228cbc, + 0xcc451979, 0x988a32f3, 0x311465e7, 0x6228cbce, + 0xc451979c, 0x88a32f39, 0x11465e73, 0x228cbce6, + 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, + 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, + 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, + 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5, + 0x7a879d8a, 0xf50f3b14, 0xea1e7629, 0xd43cec53, + 0xa879d8a7, 0x50f3b14f, 0xa1e7629e, 0x43cec53d, + 0x879d8a7a, 0x0f3b14f5, 0x1e7629ea, 0x3cec53d4, + 0x79d8a7a8, 0xf3b14f50, 0xe7629ea1, 0xcec53d43, + 0x9d8a7a87, 0x3b14f50f, 0x7629ea1e, 0xec53d43c, + 0xd8a7a879, 0xb14f50f3, 0x629ea1e7, 0xc53d43ce, + 0x8a7a879d, 0x14f50f3b, 0x29ea1e76, 0x53d43cec, + 0xa7a879d8, 0x4f50f3b1, 0x9ea1e762, 0x3d43cec5 +}; + +/* + * Transform the message X which consists of 16 32-bit-words. See + * GM/T 004-2012 for details. + */ +#define R(i, a, b, c, d, e, f, g, h, t, w1, w2) \ + do { \ + ss1 = rol32((rol32((a), 12) + (e) + (t)), 7); \ + ss2 = ss1 ^ rol32((a), 12); \ + d += FF ## i(a, b, c) + ss2 + ((w1) ^ (w2)); \ + h += GG ## i(e, f, g) + ss1 + (w1); \ + b = rol32((b), 9); \ + f = rol32((f), 19); \ + h = P0((h)); \ + } while (0) + +#define R1(a, b, c, d, e, f, g, h, t, w1, w2) \ + R(1, a, b, c, d, e, f, g, h, t, w1, w2) +#define R2(a, b, c, d, e, f, g, h, t, w1, w2) \ + R(2, a, b, c, d, e, f, g, h, t, w1, w2) + +#define FF1(x, y, z) (x ^ y ^ z) +#define FF2(x, y, z) ((x & y) | (x & z) | (y & z)) + +#define GG1(x, y, z) FF1(x, y, z) +#define GG2(x, y, z) ((x & y) | (~x & z)) + +/* Message expansion */ +#define P0(x) ((x) ^ rol32((x), 9) ^ rol32((x), 17)) +#define P1(x) ((x) ^ rol32((x), 15) ^ rol32((x), 23)) +#define I(i) (W[i] = get_unaligned_be32(data + i * 4)) +#define W1(i) (W[i & 0x0f]) +#define W2(i) (W[i & 0x0f] = \ + P1(W[i & 0x0f] \ + ^ W[(i-9) & 0x0f] \ + ^ rol32(W[(i-3) & 0x0f], 15)) \ + ^ rol32(W[(i-13) & 0x0f], 7) \ + ^ W[(i-6) & 0x0f]) + +static void sm3_transform(struct sm3_state *sctx, u8 const *data, u32 W[16]) +{ + u32 a, b, c, d, e, f, g, h, ss1, ss2; + + a = sctx->state[0]; + b = sctx->state[1]; + c = sctx->state[2]; + d = sctx->state[3]; + e = sctx->state[4]; + f = sctx->state[5]; + g = sctx->state[6]; + h = sctx->state[7]; + + R1(a, b, c, d, e, f, g, h, K[0], I(0), I(4)); + R1(d, a, b, c, h, e, f, g, K[1], I(1), I(5)); + R1(c, d, a, b, g, h, e, f, K[2], I(2), I(6)); + R1(b, c, d, a, f, g, h, e, K[3], I(3), I(7)); + R1(a, b, c, d, e, f, g, h, K[4], W1(4), I(8)); + R1(d, a, b, c, h, e, f, g, K[5], W1(5), I(9)); + R1(c, d, a, b, g, h, e, f, K[6], W1(6), I(10)); + R1(b, c, d, a, f, g, h, e, K[7], W1(7), I(11)); + R1(a, b, c, d, e, f, g, h, K[8], W1(8), I(12)); + R1(d, a, b, c, h, e, f, g, K[9], W1(9), I(13)); + R1(c, d, a, b, g, h, e, f, K[10], W1(10), I(14)); + R1(b, c, d, a, f, g, h, e, K[11], W1(11), I(15)); + R1(a, b, c, d, e, f, g, h, K[12], W1(12), W2(16)); + R1(d, a, b, c, h, e, f, g, K[13], W1(13), W2(17)); + R1(c, d, a, b, g, h, e, f, K[14], W1(14), W2(18)); + R1(b, c, d, a, f, g, h, e, K[15], W1(15), W2(19)); + + R2(a, b, c, d, e, f, g, h, K[16], W1(16), W2(20)); + R2(d, a, b, c, h, e, f, g, K[17], W1(17), W2(21)); + R2(c, d, a, b, g, h, e, f, K[18], W1(18), W2(22)); + R2(b, c, d, a, f, g, h, e, K[19], W1(19), W2(23)); + R2(a, b, c, d, e, f, g, h, K[20], W1(20), W2(24)); + R2(d, a, b, c, h, e, f, g, K[21], W1(21), W2(25)); + R2(c, d, a, b, g, h, e, f, K[22], W1(22), W2(26)); + R2(b, c, d, a, f, g, h, e, K[23], W1(23), W2(27)); + R2(a, b, c, d, e, f, g, h, K[24], W1(24), W2(28)); + R2(d, a, b, c, h, e, f, g, K[25], W1(25), W2(29)); + R2(c, d, a, b, g, h, e, f, K[26], W1(26), W2(30)); + R2(b, c, d, a, f, g, h, e, K[27], W1(27), W2(31)); + R2(a, b, c, d, e, f, g, h, K[28], W1(28), W2(32)); + R2(d, a, b, c, h, e, f, g, K[29], W1(29), W2(33)); + R2(c, d, a, b, g, h, e, f, K[30], W1(30), W2(34)); + R2(b, c, d, a, f, g, h, e, K[31], W1(31), W2(35)); + + R2(a, b, c, d, e, f, g, h, K[32], W1(32), W2(36)); + R2(d, a, b, c, h, e, f, g, K[33], W1(33), W2(37)); + R2(c, d, a, b, g, h, e, f, K[34], W1(34), W2(38)); + R2(b, c, d, a, f, g, h, e, K[35], W1(35), W2(39)); + R2(a, b, c, d, e, f, g, h, K[36], W1(36), W2(40)); + R2(d, a, b, c, h, e, f, g, K[37], W1(37), W2(41)); + R2(c, d, a, b, g, h, e, f, K[38], W1(38), W2(42)); + R2(b, c, d, a, f, g, h, e, K[39], W1(39), W2(43)); + R2(a, b, c, d, e, f, g, h, K[40], W1(40), W2(44)); + R2(d, a, b, c, h, e, f, g, K[41], W1(41), W2(45)); + R2(c, d, a, b, g, h, e, f, K[42], W1(42), W2(46)); + R2(b, c, d, a, f, g, h, e, K[43], W1(43), W2(47)); + R2(a, b, c, d, e, f, g, h, K[44], W1(44), W2(48)); + R2(d, a, b, c, h, e, f, g, K[45], W1(45), W2(49)); + R2(c, d, a, b, g, h, e, f, K[46], W1(46), W2(50)); + R2(b, c, d, a, f, g, h, e, K[47], W1(47), W2(51)); + + R2(a, b, c, d, e, f, g, h, K[48], W1(48), W2(52)); + R2(d, a, b, c, h, e, f, g, K[49], W1(49), W2(53)); + R2(c, d, a, b, g, h, e, f, K[50], W1(50), W2(54)); + R2(b, c, d, a, f, g, h, e, K[51], W1(51), W2(55)); + R2(a, b, c, d, e, f, g, h, K[52], W1(52), W2(56)); + R2(d, a, b, c, h, e, f, g, K[53], W1(53), W2(57)); + R2(c, d, a, b, g, h, e, f, K[54], W1(54), W2(58)); + R2(b, c, d, a, f, g, h, e, K[55], W1(55), W2(59)); + R2(a, b, c, d, e, f, g, h, K[56], W1(56), W2(60)); + R2(d, a, b, c, h, e, f, g, K[57], W1(57), W2(61)); + R2(c, d, a, b, g, h, e, f, K[58], W1(58), W2(62)); + R2(b, c, d, a, f, g, h, e, K[59], W1(59), W2(63)); + R2(a, b, c, d, e, f, g, h, K[60], W1(60), W2(64)); + R2(d, a, b, c, h, e, f, g, K[61], W1(61), W2(65)); + R2(c, d, a, b, g, h, e, f, K[62], W1(62), W2(66)); + R2(b, c, d, a, f, g, h, e, K[63], W1(63), W2(67)); + + sctx->state[0] ^= a; + sctx->state[1] ^= b; + sctx->state[2] ^= c; + sctx->state[3] ^= d; + sctx->state[4] ^= e; + sctx->state[5] ^= f; + sctx->state[6] ^= g; + sctx->state[7] ^= h; +} +#undef R +#undef R1 +#undef R2 +#undef I +#undef W1 +#undef W2 + +void sm3_block_generic(struct sm3_state *sctx, u8 const *data, int blocks) +{ + u32 W[16]; + + do { + sm3_transform(sctx, data, W); + data += SM3_BLOCK_SIZE; + } while (--blocks); + + memzero_explicit(W, sizeof(W)); +} +EXPORT_SYMBOL_GPL(sm3_block_generic); + +MODULE_DESCRIPTION("Generic SM3 library"); +MODULE_LICENSE("GPL v2"); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 8c7fdb7d8c8f..bc9391e55d57 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -1191,7 +1191,7 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, return -ENOMEM; p = *pages; for (int k = 0; k < n; k++) { - struct folio *folio = page_folio(page); + struct folio *folio = page_folio(page + k); p[k] = page + k; if (!folio_test_slab(folio)) folio_get(folio); diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c index 3f39955cb0f1..0061d4c7e351 100644 --- a/lib/kunit/executor.c +++ b/lib/kunit/executor.c @@ -177,7 +177,7 @@ kunit_filter_suites(const struct kunit_suite_set *suite_set, const size_t max = suite_set->end - suite_set->start; - copy = kcalloc(max, sizeof(*filtered.start), GFP_KERNEL); + copy = kcalloc(max, sizeof(*copy), GFP_KERNEL); if (!copy) { /* won't be able to run anything, return an empty set */ return filtered; } diff --git a/lib/kunit/static_stub.c b/lib/kunit/static_stub.c index 92b2cccd5e76..484fd85251b4 100644 --- a/lib/kunit/static_stub.c +++ b/lib/kunit/static_stub.c @@ -96,7 +96,7 @@ void __kunit_activate_static_stub(struct kunit *test, /* If the replacement address is NULL, deactivate the stub. */ if (!replacement_addr) { - kunit_deactivate_static_stub(test, replacement_addr); + kunit_deactivate_static_stub(test, real_fn_addr); return; } diff --git a/lib/sg_split.c b/lib/sg_split.c index 60a0babebf2e..0f89aab5c671 100644 --- a/lib/sg_split.c +++ b/lib/sg_split.c @@ -88,8 +88,6 @@ static void sg_split_phys(struct sg_splitter *splitters, const int nb_splits) if (!j) { out_sg->offset += split->skip_sg0; out_sg->length -= split->skip_sg0; - } else { - out_sg->offset = 0; } sg_dma_address(out_sg) = 0; sg_dma_len(out_sg) = 0; diff --git a/lib/sort.c b/lib/sort.c index 8e73dc55476b..52363995ccc5 100644 --- a/lib/sort.c +++ b/lib/sort.c @@ -186,36 +186,13 @@ static size_t parent(size_t i, unsigned int lsbit, size_t size) return i / 2; } -/** - * sort_r - sort an array of elements - * @base: pointer to data to sort - * @num: number of elements - * @size: size of each element - * @cmp_func: pointer to comparison function - * @swap_func: pointer to swap function or NULL - * @priv: third argument passed to comparison function - * - * This function does a heapsort on the given array. You may provide - * a swap_func function if you need to do something more than a memory - * copy (e.g. fix up pointers or auxiliary data), but the built-in swap - * avoids a slow retpoline and so is significantly faster. - * - * The comparison function must adhere to specific mathematical - * properties to ensure correct and stable sorting: - * - Antisymmetry: cmp_func(a, b) must return the opposite sign of - * cmp_func(b, a). - * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then - * cmp_func(a, c) <= 0. - * - * Sorting time is O(n log n) both on average and worst-case. While - * quicksort is slightly faster on average, it suffers from exploitable - * O(n*n) worst-case behavior and extra memory requirements that make - * it less suitable for kernel use. - */ -void sort_r(void *base, size_t num, size_t size, - cmp_r_func_t cmp_func, - swap_r_func_t swap_func, - const void *priv) +#include <linux/sched.h> + +static void __sort_r(void *base, size_t num, size_t size, + cmp_r_func_t cmp_func, + swap_r_func_t swap_func, + const void *priv, + bool may_schedule) { /* pre-scale counters for performance */ size_t n = num * size, a = (num/2) * size; @@ -286,6 +263,9 @@ void sort_r(void *base, size_t num, size_t size, b = parent(b, lsbit, size); do_swap(base + b, base + c, size, swap_func, priv); } + + if (may_schedule) + cond_resched(); } n -= size; @@ -293,8 +273,63 @@ void sort_r(void *base, size_t num, size_t size, if (n == size * 2 && do_cmp(base, base + size, cmp_func, priv) > 0) do_swap(base, base + size, size, swap_func, priv); } + +/** + * sort_r - sort an array of elements + * @base: pointer to data to sort + * @num: number of elements + * @size: size of each element + * @cmp_func: pointer to comparison function + * @swap_func: pointer to swap function or NULL + * @priv: third argument passed to comparison function + * + * This function does a heapsort on the given array. You may provide + * a swap_func function if you need to do something more than a memory + * copy (e.g. fix up pointers or auxiliary data), but the built-in swap + * avoids a slow retpoline and so is significantly faster. + * + * The comparison function must adhere to specific mathematical + * properties to ensure correct and stable sorting: + * - Antisymmetry: cmp_func(a, b) must return the opposite sign of + * cmp_func(b, a). + * - Transitivity: if cmp_func(a, b) <= 0 and cmp_func(b, c) <= 0, then + * cmp_func(a, c) <= 0. + * + * Sorting time is O(n log n) both on average and worst-case. While + * quicksort is slightly faster on average, it suffers from exploitable + * O(n*n) worst-case behavior and extra memory requirements that make + * it less suitable for kernel use. + */ +void sort_r(void *base, size_t num, size_t size, + cmp_r_func_t cmp_func, + swap_r_func_t swap_func, + const void *priv) +{ + __sort_r(base, num, size, cmp_func, swap_func, priv, false); +} EXPORT_SYMBOL(sort_r); +/** + * sort_r_nonatomic - sort an array of elements, with cond_resched + * @base: pointer to data to sort + * @num: number of elements + * @size: size of each element + * @cmp_func: pointer to comparison function + * @swap_func: pointer to swap function or NULL + * @priv: third argument passed to comparison function + * + * Same as sort_r, but preferred for larger arrays as it does a periodic + * cond_resched(). + */ +void sort_r_nonatomic(void *base, size_t num, size_t size, + cmp_r_func_t cmp_func, + swap_r_func_t swap_func, + const void *priv) +{ + __sort_r(base, num, size, cmp_func, swap_func, priv, true); +} +EXPORT_SYMBOL(sort_r_nonatomic); + void sort(void *base, size_t num, size_t size, cmp_func_t cmp_func, swap_func_t swap_func) @@ -304,6 +339,19 @@ void sort(void *base, size_t num, size_t size, .swap = swap_func, }; - return sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w); + return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, false); } EXPORT_SYMBOL(sort); + +void sort_nonatomic(void *base, size_t num, size_t size, + cmp_func_t cmp_func, + swap_func_t swap_func) +{ + struct wrapper w = { + .cmp = cmp_func, + .swap = swap_func, + }; + + return __sort_r(base, num, size, _CMP_WRAPPER, SWAP_WRAPPER, &w, true); +} +EXPORT_SYMBOL(sort_nonatomic); diff --git a/lib/string.c b/lib/string.c index eb4486ed40d2..b632c71df1a5 100644 --- a/lib/string.c +++ b/lib/string.c @@ -119,6 +119,7 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count) if (count == 0 || WARN_ON_ONCE(count > INT_MAX)) return -E2BIG; +#ifndef CONFIG_DCACHE_WORD_ACCESS #ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS /* * If src is unaligned, don't cross a page boundary, @@ -134,11 +135,13 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count) if (((long) dest | (long) src) & (sizeof(long) - 1)) max = 0; #endif +#endif /* - * read_word_at_a_time() below may read uninitialized bytes after the - * trailing zero and use them in comparisons. Disable this optimization - * under KMSAN to prevent false positive reports. + * load_unaligned_zeropad() or read_word_at_a_time() below may read + * uninitialized bytes after the trailing zero and use them in + * comparisons. Disable this optimization under KMSAN to prevent + * false positive reports. */ if (IS_ENABLED(CONFIG_KMSAN)) max = 0; @@ -146,7 +149,11 @@ ssize_t sized_strscpy(char *dest, const char *src, size_t count) while (max >= sizeof(unsigned long)) { unsigned long c, data; +#ifdef CONFIG_DCACHE_WORD_ACCESS + c = load_unaligned_zeropad(src+res); +#else c = read_word_at_a_time(src+res); +#endif if (has_zero(c, &data, &constants)) { data = prep_zero_mask(c, data, &constants); data = create_zero_mask(data); diff --git a/lib/test_ubsan.c b/lib/test_ubsan.c index 8772e5edaa4f..a4b6f52b9c57 100644 --- a/lib/test_ubsan.c +++ b/lib/test_ubsan.c @@ -77,18 +77,22 @@ static void test_ubsan_shift_out_of_bounds(void) static void test_ubsan_out_of_bounds(void) { - volatile int i = 4, j = 5, k = -1; - volatile char above[4] = { }; /* Protect surrounding memory. */ - volatile int arr[4]; - volatile char below[4] = { }; /* Protect surrounding memory. */ + int i = 4, j = 4, k = -1; + volatile struct { + char above[4]; /* Protect surrounding memory. */ + int arr[4]; + char below[4]; /* Protect surrounding memory. */ + } data; - above[0] = below[0]; + OPTIMIZER_HIDE_VAR(i); + OPTIMIZER_HIDE_VAR(j); + OPTIMIZER_HIDE_VAR(k); UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "above"); - arr[j] = i; + data.arr[j] = i; UBSAN_TEST(CONFIG_UBSAN_BOUNDS, "below"); - arr[k] = i; + data.arr[k] = i; } enum ubsan_test_enum { diff --git a/lib/tests/slub_kunit.c b/lib/tests/slub_kunit.c index d47c472b0520..848b682a2d70 100644 --- a/lib/tests/slub_kunit.c +++ b/lib/tests/slub_kunit.c @@ -325,4 +325,5 @@ static struct kunit_suite test_suite = { }; kunit_test_suite(test_suite); +MODULE_DESCRIPTION("Kunit tests for slub allocator"); MODULE_LICENSE("GPL"); diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c index 9308bcfb2ad5..dfb4f2358cab 100644 --- a/lib/ucs2_string.c +++ b/lib/ucs2_string.c @@ -165,4 +165,5 @@ ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength) } EXPORT_SYMBOL(ucs2_as_utf8); +MODULE_DESCRIPTION("UCS2 string handling"); MODULE_LICENSE("GPL v2"); diff --git a/lib/vdso/datastore.c b/lib/vdso/datastore.c index c715e217ec65..3693c6caf2c4 100644 --- a/lib/vdso/datastore.c +++ b/lib/vdso/datastore.c @@ -99,7 +99,8 @@ const struct vm_special_mapping vdso_vvar_mapping = { struct vm_area_struct *vdso_install_vvar_mapping(struct mm_struct *mm, unsigned long addr) { return _install_special_mapping(mm, addr, VDSO_NR_PAGES * PAGE_SIZE, - VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | VM_PFNMAP, + VM_READ | VM_MAYREAD | VM_IO | VM_DONTDUMP | + VM_PFNMAP | VM_SEALED_SYSMAP, &vdso_vvar_mapping); } diff --git a/lib/zlib_inflate/inflate_syms.c b/lib/zlib_inflate/inflate_syms.c index 9720114c0672..b8996d90e8bc 100644 --- a/lib/zlib_inflate/inflate_syms.c +++ b/lib/zlib_inflate/inflate_syms.c @@ -18,4 +18,5 @@ EXPORT_SYMBOL(zlib_inflateEnd); EXPORT_SYMBOL(zlib_inflateReset); EXPORT_SYMBOL(zlib_inflateIncomp); EXPORT_SYMBOL(zlib_inflate_blob); +MODULE_DESCRIPTION("Data decompression using the deflation algorithm"); MODULE_LICENSE("GPL"); |