summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig7
-rw-r--r--lib/Kconfig.debug12
-rw-r--r--lib/Kconfig.ubsan3
-rw-r--r--lib/Makefile6
-rw-r--r--lib/base64.c103
-rw-r--r--lib/crypto/Kconfig3
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/blake2s-selftest.c41
-rw-r--r--lib/crypto/blake2s.c37
-rw-r--r--lib/crypto/sha1.c (renamed from lib/sha1.c)3
-rw-r--r--lib/idr.c3
-rw-r--r--lib/iov_iter.c390
-rw-r--r--lib/kunit/executor.c125
-rw-r--r--lib/kunit/executor_test.c144
-rw-r--r--lib/kunit/test.c58
-rw-r--r--lib/livepatch/test_klp_callbacks_busy.c8
-rw-r--r--lib/lockref.c25
-rw-r--r--lib/mpi/mpi-add.c2
-rw-r--r--lib/mpi/mpi-mul.c1
-rw-r--r--lib/overflow_kunit.c6
-rw-r--r--lib/sbitmap.c5
-rw-r--r--lib/test_bpf.c4
-rw-r--r--lib/test_free_pages.c2
-rw-r--r--lib/test_hmm.c347
-rw-r--r--lib/test_hmm_uapi.h19
-rw-r--r--lib/test_kasan.c10
-rw-r--r--lib/test_printf.c21
-rw-r--r--lib/test_vmalloc.c15
-rw-r--r--lib/trace_readwrite.c47
29 files changed, 872 insertions, 578 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 986ea474836c..dc1ab2ed1dc6 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -118,6 +118,13 @@ config INDIRECT_IOMEM_FALLBACK
mmio accesses when the IO memory address is not a registered
emulated region.
+config TRACE_MMIO_ACCESS
+ bool "Register read/write tracing"
+ depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS
+ help
+ Create tracepoints for MMIO read/write operations. These trace events
+ can be used for logging all MMIO read/write operations.
+
source "lib/crypto/Kconfig"
config LIB_MEMNEQ
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 04aaa20d50f9..072e4b289c13 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -498,7 +498,7 @@ config STACK_VALIDATION
runtime stack traces are more reliable.
For more information, see
- tools/objtool/Documentation/stack-validation.txt.
+ tools/objtool/Documentation/objtool.txt.
config NOINSTR_VALIDATION
bool
@@ -699,6 +699,14 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT
help
Debug objects boot parameter default value
+config SHRINKER_DEBUG
+ bool "Enable shrinker debugging support"
+ depends on DEBUG_FS
+ help
+ Say Y to enable the shrinker debugfs interface which provides
+ visibility into the kernel memory shrinkers subsystem.
+ Disable it to avoid an extra memory footprint.
+
config HAVE_DEBUG_KMEMLEAK
bool
@@ -1560,7 +1568,7 @@ config DEBUG_KOBJECT_RELEASE
help
kobjects are reference counted objects. This means that their
last reference count put is not predictable, and the kobject can
- live on past the point at which a driver decides to drop it's
+ live on past the point at which a driver decides to drop its
initial reference to the kobject gained on allocation. An
example of this would be a struct device which has just been
unregistered.
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index a9f7eb047768..fd15230a703b 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -84,6 +84,9 @@ config UBSAN_SHIFT
config UBSAN_DIV_ZERO
bool "Perform checking for integer divide-by-zero"
depends on $(cc-option,-fsanitize=integer-divide-by-zero)
+ # https://github.com/ClangBuiltLinux/linux/issues/1657
+ # https://github.com/llvm/llvm-project/issues/56289
+ depends on !CC_IS_CLANG
help
This option enables -fsanitize=integer-divide-by-zero which checks
for integer division by zero. This is effectively redundant with the
diff --git a/lib/Makefile b/lib/Makefile
index de3e47453fe8..17e48da223e2 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -29,7 +29,7 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o timerqueue.o xarray.o \
- idr.o extable.o sha1.o irq_regs.o argv_split.o \
+ idr.o extable.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
@@ -45,7 +45,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \
bust_spinlocks.o kasprintf.o bitmap.o scatterlist.o \
list_sort.o uuid.o iov_iter.o clz_ctz.o \
bsearch.o find_bit.o llist.o memweight.o kfifo.o \
- percpu-refcount.o rhashtable.o \
+ percpu-refcount.o rhashtable.o base64.o \
once.o refcount.o usercopy.o errseq.o bucket_locks.o \
generic-radix-tree.o
obj-$(CONFIG_STRING_SELFTEST) += test_string.o
@@ -151,6 +151,8 @@ lib-y += logic_pio.o
lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o
+obj-$(CONFIG_TRACE_MMIO_ACCESS) += trace_readwrite.o
+
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_BTREE) += btree.o
diff --git a/lib/base64.c b/lib/base64.c
new file mode 100644
index 000000000000..b736a7a431c5
--- /dev/null
+++ b/lib/base64.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * base64.c - RFC4648-compliant base64 encoding
+ *
+ * Copyright (c) 2020 Hannes Reinecke, SUSE
+ *
+ * Based on the base64url routines from fs/crypto/fname.c
+ * (which are using the URL-safe base64 encoding),
+ * modified to use the standard coding table from RFC4648 section 4.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/base64.h>
+
+static const char base64_table[65] =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
+
+/**
+ * base64_encode() - base64-encode some binary data
+ * @src: the binary data to encode
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the base64-encoded string. Not NUL-terminated.
+ *
+ * Encodes data using base64 encoding, i.e. the "Base 64 Encoding" specified
+ * by RFC 4648, including the '='-padding.
+ *
+ * Return: the length of the resulting base64-encoded string in bytes.
+ */
+int base64_encode(const u8 *src, int srclen, char *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ char *cp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ ac = (ac << 8) | src[i];
+ bits += 8;
+ do {
+ bits -= 6;
+ *cp++ = base64_table[(ac >> bits) & 0x3f];
+ } while (bits >= 6);
+ }
+ if (bits) {
+ *cp++ = base64_table[(ac << (6 - bits)) & 0x3f];
+ bits -= 6;
+ }
+ while (bits < 0) {
+ *cp++ = '=';
+ bits += 2;
+ }
+ return cp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_encode);
+
+/**
+ * base64_decode() - base64-decode a string
+ * @src: the string to decode. Doesn't need to be NUL-terminated.
+ * @srclen: the length of @src in bytes
+ * @dst: (output) the decoded binary data
+ *
+ * Decodes a string using base64 encoding, i.e. the "Base 64 Encoding"
+ * specified by RFC 4648, including the '='-padding.
+ *
+ * This implementation hasn't been optimized for performance.
+ *
+ * Return: the length of the resulting decoded binary data in bytes,
+ * or -1 if the string isn't a valid base64 string.
+ */
+int base64_decode(const char *src, int srclen, u8 *dst)
+{
+ u32 ac = 0;
+ int bits = 0;
+ int i;
+ u8 *bp = dst;
+
+ for (i = 0; i < srclen; i++) {
+ const char *p = strchr(base64_table, src[i]);
+
+ if (src[i] == '=') {
+ ac = (ac << 6);
+ bits += 6;
+ if (bits >= 8)
+ bits -= 8;
+ continue;
+ }
+ if (p == NULL || src[i] == 0)
+ return -1;
+ ac = (ac << 6) | (p - base64_table);
+ bits += 6;
+ if (bits >= 8) {
+ bits -= 8;
+ *bp++ = (u8)(ac >> bits);
+ }
+ }
+ if (ac & ((1 << bits) - 1))
+ return -1;
+ return bp - dst;
+}
+EXPORT_SYMBOL_GPL(base64_decode);
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 2082af43d51f..9ff549f63540 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -121,6 +121,9 @@ config CRYPTO_LIB_CHACHA20POLY1305
select CRYPTO_LIB_POLY1305
select CRYPTO_ALGAPI
+config CRYPTO_LIB_SHA1
+ tristate
+
config CRYPTO_LIB_SHA256
tristate
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 26be2bbe09c5..919cbb2c220d 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -34,6 +34,9 @@ libpoly1305-y := poly1305-donna32.o
libpoly1305-$(CONFIG_ARCH_SUPPORTS_INT128) := poly1305-donna64.o
libpoly1305-y += poly1305.o
+obj-$(CONFIG_CRYPTO_LIB_SHA1) += libsha1.o
+libsha1-y := sha1.o
+
obj-$(CONFIG_CRYPTO_LIB_SHA256) += libsha256.o
libsha256-y := sha256.o
diff --git a/lib/crypto/blake2s-selftest.c b/lib/crypto/blake2s-selftest.c
index 409e4b728770..7d77dea15587 100644
--- a/lib/crypto/blake2s-selftest.c
+++ b/lib/crypto/blake2s-selftest.c
@@ -4,6 +4,8 @@
*/
#include <crypto/internal/blake2s.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
#include <linux/string.h>
/*
@@ -587,5 +589,44 @@ bool __init blake2s_selftest(void)
}
}
+ for (i = 0; i < 32; ++i) {
+ enum { TEST_ALIGNMENT = 16 };
+ u8 unaligned_block[BLAKE2S_BLOCK_SIZE + TEST_ALIGNMENT - 1]
+ __aligned(TEST_ALIGNMENT);
+ u8 blocks[BLAKE2S_BLOCK_SIZE * 2];
+ struct blake2s_state state1, state2;
+
+ get_random_bytes(blocks, sizeof(blocks));
+ get_random_bytes(&state, sizeof(state));
+
+#if defined(CONFIG_CRYPTO_LIB_BLAKE2S_GENERIC) && \
+ defined(CONFIG_CRYPTO_ARCH_HAVE_LIB_BLAKE2S)
+ memcpy(&state1, &state, sizeof(state1));
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state1, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress_generic(&state2, blocks, 2, BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress self-test %d: FAIL\n",
+ i + 1);
+ success = false;
+ }
+#endif
+
+ memcpy(&state1, &state, sizeof(state1));
+ blake2s_compress(&state1, blocks, 1, BLAKE2S_BLOCK_SIZE);
+ for (l = 1; l < TEST_ALIGNMENT; ++l) {
+ memcpy(unaligned_block + l, blocks,
+ BLAKE2S_BLOCK_SIZE);
+ memcpy(&state2, &state, sizeof(state2));
+ blake2s_compress(&state2, unaligned_block + l, 1,
+ BLAKE2S_BLOCK_SIZE);
+ if (memcmp(&state1, &state2, sizeof(state1))) {
+ pr_err("blake2s random compress align %d self-test %d: FAIL\n",
+ l, i + 1);
+ success = false;
+ }
+ }
+ }
+
return success;
}
diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c
index c71c09621c09..98e688c6d891 100644
--- a/lib/crypto/blake2s.c
+++ b/lib/crypto/blake2s.c
@@ -16,16 +16,44 @@
#include <linux/init.h>
#include <linux/bug.h>
+static inline void blake2s_set_lastblock(struct blake2s_state *state)
+{
+ state->f[0] = -1;
+}
+
void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen)
{
- __blake2s_update(state, in, inlen, false);
+ const size_t fill = BLAKE2S_BLOCK_SIZE - state->buflen;
+
+ if (unlikely(!inlen))
+ return;
+ if (inlen > fill) {
+ memcpy(state->buf + state->buflen, in, fill);
+ blake2s_compress(state, state->buf, 1, BLAKE2S_BLOCK_SIZE);
+ state->buflen = 0;
+ in += fill;
+ inlen -= fill;
+ }
+ if (inlen > BLAKE2S_BLOCK_SIZE) {
+ const size_t nblocks = DIV_ROUND_UP(inlen, BLAKE2S_BLOCK_SIZE);
+ blake2s_compress(state, in, nblocks - 1, BLAKE2S_BLOCK_SIZE);
+ in += BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ inlen -= BLAKE2S_BLOCK_SIZE * (nblocks - 1);
+ }
+ memcpy(state->buf + state->buflen, in, inlen);
+ state->buflen += inlen;
}
EXPORT_SYMBOL(blake2s_update);
void blake2s_final(struct blake2s_state *state, u8 *out)
{
WARN_ON(IS_ENABLED(DEBUG) && !out);
- __blake2s_final(state, out, false);
+ blake2s_set_lastblock(state);
+ memset(state->buf + state->buflen, 0,
+ BLAKE2S_BLOCK_SIZE - state->buflen); /* Padding */
+ blake2s_compress(state, state->buf, 1, state->buflen);
+ cpu_to_le32_array(state->h, ARRAY_SIZE(state->h));
+ memcpy(out, state->h, state->outlen);
memzero_explicit(state, sizeof(*state));
}
EXPORT_SYMBOL(blake2s_final);
@@ -38,12 +66,7 @@ static int __init blake2s_mod_init(void)
return 0;
}
-static void __exit blake2s_mod_exit(void)
-{
-}
-
module_init(blake2s_mod_init);
-module_exit(blake2s_mod_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("BLAKE2s hash function");
MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>");
diff --git a/lib/sha1.c b/lib/crypto/sha1.c
index 0494766fc574..1aebe7be9401 100644
--- a/lib/sha1.c
+++ b/lib/crypto/sha1.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/export.h>
+#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/string.h>
#include <crypto/sha1.h>
@@ -135,3 +136,5 @@ void sha1_init(__u32 *buf)
buf[4] = 0xc3d2e1f0;
}
EXPORT_SYMBOL(sha1_init);
+
+MODULE_LICENSE("GPL");
diff --git a/lib/idr.c b/lib/idr.c
index f4ab4f4aa3c7..7ecdfdb5309e 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -491,7 +491,8 @@ void ida_free(struct ida *ida, unsigned int id)
struct ida_bitmap *bitmap;
unsigned long flags;
- BUG_ON((int)id < 0);
+ if ((int)id < 0)
+ return;
xas_lock_irqsave(&xas, flags);
bitmap = xas_load(&xas);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 0b64695ab632..0e0be334dbee 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -168,174 +168,6 @@ static int copyin(void *to, const void __user *from, size_t n)
return n;
}
-static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
-{
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *from;
-
- if (unlikely(bytes > i->count))
- bytes = i->count;
-
- if (unlikely(!bytes))
- return 0;
-
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
-
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_writeable(buf, copy)) {
- kaddr = kmap_atomic(page);
- from = kaddr + offset;
-
- /* first chunk, usually the only one */
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
-
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = from - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
- }
- /* Too bad - revert to non-atomic kmap */
-
- kaddr = kmap(page);
- from = kaddr + offset;
- left = copyout(buf, from, copy);
- copy -= left;
- skip += copy;
- from += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyout(buf, from, copy);
- copy -= left;
- skip = copy;
- from += copy;
- bytes -= copy;
- }
- kunmap(page);
-
-done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
-}
-
-static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t bytes,
- struct iov_iter *i)
-{
- size_t skip, copy, left, wanted;
- const struct iovec *iov;
- char __user *buf;
- void *kaddr, *to;
-
- if (unlikely(bytes > i->count))
- bytes = i->count;
-
- if (unlikely(!bytes))
- return 0;
-
- might_fault();
- wanted = bytes;
- iov = i->iov;
- skip = i->iov_offset;
- buf = iov->iov_base + skip;
- copy = min(bytes, iov->iov_len - skip);
-
- if (IS_ENABLED(CONFIG_HIGHMEM) && !fault_in_readable(buf, copy)) {
- kaddr = kmap_atomic(page);
- to = kaddr + offset;
-
- /* first chunk, usually the only one */
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
-
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- if (likely(!bytes)) {
- kunmap_atomic(kaddr);
- goto done;
- }
- offset = to - kaddr;
- buf += copy;
- kunmap_atomic(kaddr);
- copy = min(bytes, iov->iov_len - skip);
- }
- /* Too bad - revert to non-atomic kmap */
-
- kaddr = kmap(page);
- to = kaddr + offset;
- left = copyin(to, buf, copy);
- copy -= left;
- skip += copy;
- to += copy;
- bytes -= copy;
- while (unlikely(!left && bytes)) {
- iov++;
- buf = iov->iov_base;
- copy = min(bytes, iov->iov_len);
- left = copyin(to, buf, copy);
- copy -= left;
- skip = copy;
- to += copy;
- bytes -= copy;
- }
- kunmap(page);
-
-done:
- if (skip == iov->iov_len) {
- iov++;
- skip = 0;
- }
- i->count -= wanted - bytes;
- i->nr_segs -= iov - i->iov;
- i->iov = iov;
- i->iov_offset = skip;
- return wanted - bytes;
-}
-
#ifdef PIPE_PARANOIA
static bool sanity(const struct iov_iter *i)
{
@@ -689,6 +521,7 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
unsigned int i_head;
+ unsigned int valid = pipe->head;
size_t n, off, xfer = 0;
if (!sanity(i))
@@ -702,11 +535,17 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
rem = copy_mc_to_kernel(p + off, addr + xfer, chunk);
chunk -= rem;
kunmap_local(p);
- i->head = i_head;
- i->iov_offset = off + chunk;
- xfer += chunk;
- if (rem)
+ if (chunk) {
+ i->head = i_head;
+ i->iov_offset = off + chunk;
+ xfer += chunk;
+ valid = i_head + 1;
+ }
+ if (rem) {
+ pipe->bufs[i_head & p_mask].len -= rem;
+ pipe_discard_from(pipe, valid);
break;
+ }
n -= chunk;
off = 0;
i_head++;
@@ -848,24 +687,14 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
- if (likely(iter_is_iovec(i)))
- return copy_page_to_iter_iovec(page, offset, bytes, i);
- if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ if (unlikely(iov_iter_is_pipe(i))) {
+ return copy_page_to_iter_pipe(page, offset, bytes, i);
+ } else {
void *kaddr = kmap_local_page(page);
size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
kunmap_local(kaddr);
return wanted;
}
- if (iov_iter_is_pipe(i))
- return copy_page_to_iter_pipe(page, offset, bytes, i);
- if (unlikely(iov_iter_is_discard(i))) {
- if (unlikely(i->count < bytes))
- bytes = i->count;
- i->count -= bytes;
- return bytes;
- }
- WARN_ON(1);
- return 0;
}
size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
@@ -896,17 +725,12 @@ EXPORT_SYMBOL(copy_page_to_iter);
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
struct iov_iter *i)
{
- if (unlikely(!page_copy_sane(page, offset, bytes)))
- return 0;
- if (likely(iter_is_iovec(i)))
- return copy_page_from_iter_iovec(page, offset, bytes, i);
- if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
+ if (page_copy_sane(page, offset, bytes)) {
void *kaddr = kmap_local_page(page);
size_t wanted = _copy_from_iter(kaddr + offset, bytes, i);
kunmap_local(kaddr);
return wanted;
}
- WARN_ON(1);
return 0;
}
EXPORT_SYMBOL(copy_page_from_iter);
@@ -1029,17 +853,22 @@ static void pipe_advance(struct iov_iter *i, size_t size)
static void iov_iter_bvec_advance(struct iov_iter *i, size_t size)
{
- struct bvec_iter bi;
+ const struct bio_vec *bvec, *end;
- bi.bi_size = i->count;
- bi.bi_bvec_done = i->iov_offset;
- bi.bi_idx = 0;
- bvec_iter_advance(i->bvec, &bi, size);
+ if (!i->count)
+ return;
+ i->count -= size;
+
+ size += i->iov_offset;
- i->bvec += bi.bi_idx;
- i->nr_segs -= bi.bi_idx;
- i->count = bi.bi_size;
- i->iov_offset = bi.bi_bvec_done;
+ for (bvec = i->bvec, end = bvec + i->nr_segs; bvec < end; bvec++) {
+ if (likely(size < bvec->bv_len))
+ break;
+ size -= bvec->bv_len;
+ }
+ i->iov_offset = size;
+ i->nr_segs -= bvec - i->bvec;
+ i->bvec = bvec;
}
static void iov_iter_iovec_advance(struct iov_iter *i, size_t size)
@@ -1268,6 +1097,98 @@ void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count)
}
EXPORT_SYMBOL(iov_iter_discard);
+static bool iov_iter_aligned_iovec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ size_t size = i->count;
+ size_t skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->iov[k].iov_len - skip;
+
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(i->iov[k].iov_base + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
+ }
+ return true;
+}
+
+static bool iov_iter_aligned_bvec(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ size_t size = i->count;
+ unsigned skip = i->iov_offset;
+ unsigned k;
+
+ for (k = 0; k < i->nr_segs; k++, skip = 0) {
+ size_t len = i->bvec[k].bv_len - skip;
+
+ if (len > size)
+ len = size;
+ if (len & len_mask)
+ return false;
+ if ((unsigned long)(i->bvec[k].bv_offset + skip) & addr_mask)
+ return false;
+
+ size -= len;
+ if (!size)
+ break;
+ }
+ return true;
+}
+
+/**
+ * iov_iter_is_aligned() - Check if the addresses and lengths of each segments
+ * are aligned to the parameters.
+ *
+ * @i: &struct iov_iter to restore
+ * @addr_mask: bit mask to check against the iov element's addresses
+ * @len_mask: bit mask to check against the iov element's lengths
+ *
+ * Return: false if any addresses or lengths intersect with the provided masks
+ */
+bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask,
+ unsigned len_mask)
+{
+ if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
+ return iov_iter_aligned_iovec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_bvec(i))
+ return iov_iter_aligned_bvec(i, addr_mask, len_mask);
+
+ if (iov_iter_is_pipe(i)) {
+ unsigned int p_mask = i->pipe->ring_size - 1;
+ size_t size = i->count;
+
+ if (size & len_mask)
+ return false;
+ if (size && allocated(&i->pipe->bufs[i->head & p_mask])) {
+ if (i->iov_offset & addr_mask)
+ return false;
+ }
+
+ return true;
+ }
+
+ if (iov_iter_is_xarray(i)) {
+ if (i->count & len_mask)
+ return false;
+ if ((i->xarray_start + i->iov_offset) & addr_mask)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(iov_iter_is_aligned);
+
static unsigned long iov_iter_alignment_iovec(const struct iov_iter *i)
{
unsigned long res = 0;
@@ -1465,47 +1386,36 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
}
/* must be done on non-empty ITER_IOVEC one */
-static unsigned long first_iovec_segment(const struct iov_iter *i,
- size_t *size, size_t *start,
- size_t maxsize, unsigned maxpages)
+static unsigned long first_iovec_segment(const struct iov_iter *i, size_t *size)
{
size_t skip;
long k;
for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
- unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
size_t len = i->iov[k].iov_len - skip;
if (unlikely(!len))
continue;
- if (len > maxsize)
- len = maxsize;
- len += (*start = addr % PAGE_SIZE);
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- *size = len;
- return addr & PAGE_MASK;
+ if (*size > len)
+ *size = len;
+ return (unsigned long)i->iov[k].iov_base + skip;
}
BUG(); // if it had been empty, we wouldn't get called
}
/* must be done on non-empty ITER_BVEC one */
static struct page *first_bvec_segment(const struct iov_iter *i,
- size_t *size, size_t *start,
- size_t maxsize, unsigned maxpages)
+ size_t *size, size_t *start)
{
struct page *page;
size_t skip = i->iov_offset, len;
len = i->bvec->bv_len - skip;
- if (len > maxsize)
- len = maxsize;
+ if (*size > len)
+ *size = len;
skip += i->bvec->bv_offset;
page = i->bvec->bv_page + skip / PAGE_SIZE;
- len += (*start = skip % PAGE_SIZE);
- if (len > maxpages * PAGE_SIZE)
- len = maxpages * PAGE_SIZE;
- *size = len;
+ *start = skip % PAGE_SIZE;
return page;
}
@@ -1513,13 +1423,14 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
struct page **pages, size_t maxsize, unsigned maxpages,
size_t *start)
{
- size_t len;
int n, res;
if (maxsize > i->count)
maxsize = i->count;
if (!maxsize)
return 0;
+ if (maxsize > MAX_RW_COUNT)
+ maxsize = MAX_RW_COUNT;
if (likely(iter_is_iovec(i))) {
unsigned int gup_flags = 0;
@@ -1530,21 +1441,27 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
if (i->nofault)
gup_flags |= FOLL_NOFAULT;
- addr = first_iovec_segment(i, &len, start, maxsize, maxpages);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
+ addr = first_iovec_segment(i, &maxsize);
+ *start = addr % PAGE_SIZE;
+ addr &= PAGE_MASK;
+ n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
+ if (n > maxpages)
+ n = maxpages;
res = get_user_pages_fast(addr, n, gup_flags, pages);
if (unlikely(res <= 0))
return res;
- return (res == n ? len : res * PAGE_SIZE) - *start;
+ return min_t(size_t, maxsize, res * PAGE_SIZE - *start);
}
if (iov_iter_is_bvec(i)) {
struct page *page;
- page = first_bvec_segment(i, &len, start, maxsize, maxpages);
- n = DIV_ROUND_UP(len, PAGE_SIZE);
- while (n--)
+ page = first_bvec_segment(i, &maxsize, start);
+ n = DIV_ROUND_UP(maxsize + *start, PAGE_SIZE);
+ if (n > maxpages)
+ n = maxpages;
+ for (int k = 0; k < n; k++)
get_page(*pages++ = page++);
- return len - *start;
+ return min_t(size_t, maxsize, n * PAGE_SIZE - *start);
}
if (iov_iter_is_pipe(i))
return pipe_get_pages(i, pages, maxsize, maxpages, start);
@@ -1633,13 +1550,14 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
size_t *start)
{
struct page **p;
- size_t len;
int n, res;
if (maxsize > i->count)
maxsize = i->count;
if (!maxsize)
return 0;
+ if (maxsize > MAX_RW_COUNT)
+ maxsize = MAX_RW_COUNT;
if (likely(iter_is_iovec(i))) {
unsigned int gup_flags = 0;
@@ -