diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 4 | ||||
-rw-r--r-- | lib/Kconfig.debug | 35 | ||||
-rw-r--r-- | lib/Makefile | 5 | ||||
-rw-r--r-- | lib/btree.c | 1 | ||||
-rw-r--r-- | lib/buildid.c | 2 | ||||
-rw-r--r-- | lib/cpu_rmap.c | 57 | ||||
-rw-r--r-- | lib/crypto/blake2s-generic.c | 5 | ||||
-rw-r--r-- | lib/crypto/blake2s.c | 1 | ||||
-rw-r--r-- | lib/crypto/utils.c | 2 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 51 | ||||
-rw-r--r-- | lib/fault-inject.c | 191 | ||||
-rw-r--r-- | lib/group_cpus.c | 1 | ||||
-rw-r--r-- | lib/iov_iter.c | 189 | ||||
-rw-r--r-- | lib/kobject.c | 34 | ||||
-rw-r--r-- | lib/kunit/debugfs.c | 14 | ||||
-rw-r--r-- | lib/kunit/kunit-test.c | 77 | ||||
-rw-r--r-- | lib/kunit/test.c | 57 | ||||
-rw-r--r-- | lib/libcrc32c.c | 6 | ||||
-rw-r--r-- | lib/list-test.c | 300 | ||||
-rw-r--r-- | lib/maple_tree.c | 144 | ||||
-rw-r--r-- | lib/packing.c | 1 | ||||
-rw-r--r-- | lib/pldmfw/pldmfw.c | 1 | ||||
-rw-r--r-- | lib/rbtree.c | 2 | ||||
-rw-r--r-- | lib/rcuref.c | 281 | ||||
-rw-r--r-- | lib/seq_buf.c | 32 | ||||
-rw-r--r-- | lib/show_mem.c | 19 | ||||
-rw-r--r-- | lib/stackdepot.c | 12 | ||||
-rw-r--r-- | lib/test-string_helpers.c | 2 | ||||
-rw-r--r-- | lib/test_fprobe.c | 106 | ||||
-rw-r--r-- | lib/test_printf.c | 26 | ||||
-rw-r--r-- | lib/test_vmalloc.c | 39 | ||||
-rw-r--r-- | lib/vdso/Makefile | 13 | ||||
-rw-r--r-- | lib/vsprintf.c | 23 |
33 files changed, 1491 insertions, 242 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index ce2abffb9ed8..5c2da561c516 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -92,6 +92,7 @@ config ARCH_USE_SYM_ANNOTATIONS config INDIRECT_PIO bool "Access I/O in non-MMIO mode" depends on ARM64 + depends on HAS_IOPORT help On some platforms where no separate I/O space exists, there are I/O hosts which can not be accessed in MMIO mode. Using the logical PIO @@ -509,6 +510,9 @@ config HAS_IOMEM depends on !NO_IOMEM default y +config HAS_IOPORT + bool + config HAS_IOPORT_MAP bool depends on HAS_IOMEM && !NO_IOPORT_MAP diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 39d1d93164bd..ce51d4dc6803 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -502,7 +502,7 @@ config SECTION_MISMATCH_WARN_ONLY config DEBUG_FORCE_FUNCTION_ALIGN_64B bool "Force all function address 64B aligned" - depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC) + depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC || S390) select FUNCTION_ALIGNMENT_64B help There are cases that a commit from one domain changes the function @@ -791,6 +791,16 @@ config DEBUG_VM If unsure, say N. +config DEBUG_VM_SHOOT_LAZIES + bool "Debug MMU_LAZY_TLB_SHOOTDOWN implementation" + depends on DEBUG_VM + depends on MMU_LAZY_TLB_SHOOTDOWN + help + Enable additional IPIs that ensure lazy tlb mm references are removed + before the mm is freed. + + If unsure, say N. + config DEBUG_VM_MAPLE_TREE bool "Debug VM maple trees" depends on DEBUG_VM @@ -1480,6 +1490,15 @@ config CSD_LOCK_WAIT_DEBUG include the IPI handler function currently executing (if any) and relevant stack traces. +config CSD_LOCK_WAIT_DEBUG_DEFAULT + bool "Default csd_lock_wait() debugging on at boot time" + depends on CSD_LOCK_WAIT_DEBUG + depends on 64BIT + default n + help + This option causes the csdlock_debug= kernel boot parameter to + default to 1 (basic debugging) instead of 0 (no debugging). + endmenu # lock debugging config TRACE_IRQFLAGS @@ -1958,9 +1977,21 @@ config FAIL_SUNRPC Provide fault-injection capability for SunRPC and its consumers. +config FAULT_INJECTION_CONFIGFS + bool "Configfs interface for fault-injection capabilities" + depends on FAULT_INJECTION + select CONFIGFS_FS + help + This option allows configfs-based drivers to dynamically configure + fault-injection via configfs. Each parameter for driver-specific + fault-injection can be made visible as a configfs attribute in a + configfs group. + + config FAULT_INJECTION_STACKTRACE_FILTER bool "stacktrace filter for fault-injection capabilities" - depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT + depends on FAULT_INJECTION + depends on (FAULT_INJECTION_DEBUG_FS || FAULT_INJECTION_CONFIGFS) && STACKTRACE_SUPPORT select STACKTRACE depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 help diff --git a/lib/Makefile b/lib/Makefile index baf2821f7a00..876fcdeae34e 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -47,7 +47,7 @@ obj-y += bcd.o sort.o parser.o debug_locks.o random32.o \ list_sort.o uuid.o iov_iter.o clz_ctz.o \ bsearch.o find_bit.o llist.o memweight.o kfifo.o \ percpu-refcount.o rhashtable.o base64.o \ - once.o refcount.o usercopy.o errseq.o bucket_locks.o \ + once.o refcount.o rcuref.o usercopy.o errseq.o bucket_locks.o \ generic-radix-tree.o obj-$(CONFIG_STRING_SELFTEST) += test_string.o obj-y += string_helpers.o @@ -231,6 +231,9 @@ lib-$(CONFIG_GENERIC_BUG) += bug.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o +#ensure exported functions have prototypes +CFLAGS_dynamic_debug.o := -DDYNAMIC_DEBUG_MODULE + obj-$(CONFIG_SYMBOLIC_ERRNAME) += errname.o obj-$(CONFIG_NLATTR) += nlattr.o diff --git a/lib/btree.c b/lib/btree.c index a82100c73b55..49420cae3a83 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -794,4 +794,3 @@ module_exit(btree_module_exit); MODULE_AUTHOR("Joern Engel <joern@logfs.org>"); MODULE_AUTHOR("Johannes Berg <johannes@sipsolutions.net>"); -MODULE_LICENSE("GPL"); diff --git a/lib/buildid.c b/lib/buildid.c index dfc62625cae4..e3a7acdeef0e 100644 --- a/lib/buildid.c +++ b/lib/buildid.c @@ -163,7 +163,7 @@ out: /** * build_id_parse_buf - Get build ID from a buffer - * @buf: Elf note section(s) to parse + * @buf: ELF note section(s) to parse * @buf_size: Size of @buf in bytes * @build_id: Build ID parsed from @buf, at least BUILD_ID_SIZE_MAX long * diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index f08d9c56f712..73c1636b927b 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -128,19 +128,31 @@ debug_print_rmap(const struct cpu_rmap *rmap, const char *prefix) } #endif +static int get_free_index(struct cpu_rmap *rmap) +{ + int i; + + for (i = 0; i < rmap->size; i++) + if (!rmap->obj[i]) + return i; + + return -ENOSPC; +} + /** * cpu_rmap_add - add object to a rmap * @rmap: CPU rmap allocated with alloc_cpu_rmap() * @obj: Object to add to rmap * - * Return index of object. + * Return index of object or -ENOSPC if no free entry was found */ int cpu_rmap_add(struct cpu_rmap *rmap, void *obj) { - u16 index; + int index = get_free_index(rmap); + + if (index < 0) + return index; - BUG_ON(rmap->used >= rmap->size); - index = rmap->used++; rmap->obj[index] = obj; return index; } @@ -230,9 +242,10 @@ void free_irq_cpu_rmap(struct cpu_rmap *rmap) if (!rmap) return; - for (index = 0; index < rmap->used; index++) { + for (index = 0; index < rmap->size; index++) { glue = rmap->obj[index]; - irq_set_affinity_notifier(glue->notify.irq, NULL); + if (glue) + irq_set_affinity_notifier(glue->notify.irq, NULL); } cpu_rmap_put(rmap); @@ -268,10 +281,22 @@ static void irq_cpu_rmap_release(struct kref *ref) container_of(ref, struct irq_glue, notify.kref); cpu_rmap_put(glue->rmap); + glue->rmap->obj[glue->index] = NULL; kfree(glue); } /** + * irq_cpu_rmap_remove - remove an IRQ from a CPU affinity reverse-map + * @rmap: The reverse-map + * @irq: The IRQ number + */ +int irq_cpu_rmap_remove(struct cpu_rmap *rmap, int irq) +{ + return irq_set_affinity_notifier(irq, NULL); +} +EXPORT_SYMBOL(irq_cpu_rmap_remove); + +/** * irq_cpu_rmap_add - add an IRQ to a CPU affinity reverse-map * @rmap: The reverse-map * @irq: The IRQ number @@ -293,12 +318,22 @@ int irq_cpu_rmap_add(struct cpu_rmap *rmap, int irq) glue->notify.release = irq_cpu_rmap_release; glue->rmap = rmap; cpu_rmap_get(rmap); - glue->index = cpu_rmap_add(rmap, glue); + rc = cpu_rmap_add(rmap, glue); + if (rc < 0) + goto err_add; + + glue->index = rc; rc = irq_set_affinity_notifier(irq, &glue->notify); - if (rc) { - cpu_rmap_put(glue->rmap); - kfree(glue); - } + if (rc) + goto err_set; + + return rc; + +err_set: + rmap->obj[glue->index] = NULL; +err_add: + cpu_rmap_put(glue->rmap); + kfree(glue); return rc; } EXPORT_SYMBOL(irq_cpu_rmap_add); diff --git a/lib/crypto/blake2s-generic.c b/lib/crypto/blake2s-generic.c index 75ccb3e633e6..3b6dcfdd9628 100644 --- a/lib/crypto/blake2s-generic.c +++ b/lib/crypto/blake2s-generic.c @@ -12,7 +12,6 @@ #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/init.h> #include <linux/bug.h> #include <asm/unaligned.h> @@ -109,7 +108,3 @@ void blake2s_compress_generic(struct blake2s_state *state, const u8 *block, } EXPORT_SYMBOL(blake2s_compress_generic); - -MODULE_LICENSE("GPL v2"); -MODULE_DESCRIPTION("BLAKE2s hash function"); -MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/crypto/blake2s.c b/lib/crypto/blake2s.c index 98e688c6d891..71a316552cc5 100644 --- a/lib/crypto/blake2s.c +++ b/lib/crypto/blake2s.c @@ -67,6 +67,5 @@ static int __init blake2s_mod_init(void) } module_init(blake2s_mod_init); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("BLAKE2s hash function"); MODULE_AUTHOR("Jason A. Donenfeld <Jason@zx2c4.com>"); diff --git a/lib/crypto/utils.c b/lib/crypto/utils.c index 53230ab1b195..c852c7151b0a 100644 --- a/lib/crypto/utils.c +++ b/lib/crypto/utils.c @@ -6,7 +6,7 @@ */ #include <asm/unaligned.h> -#include <crypto/algapi.h> +#include <crypto/utils.h> #include <linux/module.h> /* diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 009f2ead09c1..fdd6d9800a70 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -1223,8 +1223,7 @@ static void ddebug_attach_module_classes(struct ddebug_table *dt, * Allocate a new ddebug_table for the given module * and add it to the global list. */ -static int __ddebug_add_module(struct _ddebug_info *di, unsigned int base, - const char *modname) +static int ddebug_add_module(struct _ddebug_info *di, const char *modname) { struct ddebug_table *dt; @@ -1263,11 +1262,6 @@ static int __ddebug_add_module(struct _ddebug_info *di, unsigned int base, return 0; } -int ddebug_add_module(struct _ddebug_info *di, const char *modname) -{ - return __ddebug_add_module(di, 0, modname); -} - /* helper for ddebug_dyndbg_(boot|module)_param_cb */ static int ddebug_dyndbg_param_cb(char *param, char *val, const char *modname, int on_err) @@ -1314,11 +1308,13 @@ static void ddebug_table_free(struct ddebug_table *dt) kfree(dt); } +#ifdef CONFIG_MODULES + /* * Called in response to a module being unloaded. Removes * any ddebug_table's which point at the module. */ -int ddebug_remove_module(const char *mod_name) +static int ddebug_remove_module(const char *mod_name) { struct ddebug_table *dt, *nextdt; int ret = -ENOENT; @@ -1337,6 +1333,33 @@ int ddebug_remove_module(const char *mod_name) return ret; } +static int ddebug_module_notify(struct notifier_block *self, unsigned long val, + void *data) +{ + struct module *mod = data; + int ret = 0; + + switch (val) { + case MODULE_STATE_COMING: + ret = ddebug_add_module(&mod->dyndbg_info, mod->name); + if (ret) + WARN(1, "Failed to allocate memory: dyndbg may not work properly.\n"); + break; + case MODULE_STATE_GOING: + ddebug_remove_module(mod->name); + break; + } + + return notifier_from_errno(ret); +} + +static struct notifier_block ddebug_module_nb = { + .notifier_call = ddebug_module_notify, + .priority = 0, /* dynamic debug depends on jump label */ +}; + +#endif /* CONFIG_MODULES */ + static void ddebug_remove_all_tables(void) { mutex_lock(&ddebug_lock); @@ -1388,6 +1411,14 @@ static int __init dynamic_debug_init(void) .num_classes = __stop___dyndbg_classes - __start___dyndbg_classes, }; +#ifdef CONFIG_MODULES + ret = register_module_notifier(&ddebug_module_nb); + if (ret) { + pr_warn("Failed to register dynamic debug module notifier\n"); + return ret; + } +#endif /* CONFIG_MODULES */ + if (&__start___dyndbg == &__stop___dyndbg) { if (IS_ENABLED(CONFIG_DYNAMIC_DEBUG)) { pr_warn("_ddebug table is empty in a CONFIG_DYNAMIC_DEBUG build\n"); @@ -1408,7 +1439,7 @@ static int __init dynamic_debug_init(void) mod_ct++; di.num_descs = mod_sites; di.descs = iter_mod_start; - ret = __ddebug_add_module(&di, i - mod_sites, modname); + ret = ddebug_add_module(&di, modname); if (ret) goto out_err; @@ -1419,7 +1450,7 @@ static int __init dynamic_debug_init(void) } di.num_descs = mod_sites; di.descs = iter_mod_start; - ret = __ddebug_add_module(&di, i - mod_sites, modname); + ret = ddebug_add_module(&di, modname); if (ret) goto out_err; diff --git a/lib/fault-inject.c b/lib/fault-inject.c index 6cff320c4eb4..d608f9b48c10 100644 --- a/lib/fault-inject.c +++ b/lib/fault-inject.c @@ -244,3 +244,194 @@ struct dentry *fault_create_debugfs_attr(const char *name, EXPORT_SYMBOL_GPL(fault_create_debugfs_attr); #endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ + +#ifdef CONFIG_FAULT_INJECTION_CONFIGFS + +/* These configfs attribute utilities are copied from drivers/block/null_blk/main.c */ + +static ssize_t fault_uint_attr_show(unsigned int val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", val); +} + +static ssize_t fault_ulong_attr_show(unsigned long val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%lu\n", val); +} + +static ssize_t fault_bool_attr_show(bool val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%u\n", val); +} + +static ssize_t fault_atomic_t_attr_show(atomic_t val, char *page) +{ + return snprintf(page, PAGE_SIZE, "%d\n", atomic_read(&val)); +} + +static ssize_t fault_uint_attr_store(unsigned int *val, const char *page, size_t count) +{ + unsigned int tmp; + int result; + + result = kstrtouint(page, 0, &tmp); + if (result < 0) + return result; + + *val = tmp; + return count; +} + +static ssize_t fault_ulong_attr_store(unsigned long *val, const char *page, size_t count) +{ + int result; + unsigned long tmp; + + result = kstrtoul(page, 0, &tmp); + if (result < 0) + return result; + + *val = tmp; + return count; +} + +static ssize_t fault_bool_attr_store(bool *val, const char *page, size_t count) +{ + bool tmp; + int result; + + result = kstrtobool(page, &tmp); + if (result < 0) + return result; + + *val = tmp; + return count; +} + +static ssize_t fault_atomic_t_attr_store(atomic_t *val, const char *page, size_t count) +{ + int tmp; + int result; + + result = kstrtoint(page, 0, &tmp); + if (result < 0) + return result; + + atomic_set(val, tmp); + return count; +} + +#define CONFIGFS_ATTR_NAMED(_pfx, _name, _attr_name) \ +static struct configfs_attribute _pfx##attr_##_name = { \ + .ca_name = _attr_name, \ + .ca_mode = 0644, \ + .ca_owner = THIS_MODULE, \ + .show = _pfx##_name##_show, \ + .store = _pfx##_name##_store, \ +} + +static struct fault_config *to_fault_config(struct config_item *item) +{ + return container_of(to_config_group(item), struct fault_config, group); +} + +#define FAULT_CONFIGFS_ATTR_NAMED(NAME, ATTR_NAME, MEMBER, TYPE) \ +static ssize_t fault_##NAME##_show(struct config_item *item, char *page) \ +{ \ + return fault_##TYPE##_attr_show(to_fault_config(item)->attr.MEMBER, page); \ +} \ +static ssize_t fault_##NAME##_store(struct config_item *item, const char *page, size_t count) \ +{ \ + struct fault_config *config = to_fault_config(item); \ + return fault_##TYPE##_attr_store(&config->attr.MEMBER, page, count); \ +} \ +CONFIGFS_ATTR_NAMED(fault_, NAME, ATTR_NAME) + +#define FAULT_CONFIGFS_ATTR(NAME, TYPE) \ + FAULT_CONFIGFS_ATTR_NAMED(NAME, __stringify(NAME), NAME, TYPE) + +FAULT_CONFIGFS_ATTR(probability, ulong); +FAULT_CONFIGFS_ATTR(interval, ulong); +FAULT_CONFIGFS_ATTR(times, atomic_t); +FAULT_CONFIGFS_ATTR(space, atomic_t); +FAULT_CONFIGFS_ATTR(verbose, ulong); +FAULT_CONFIGFS_ATTR_NAMED(ratelimit_interval, "verbose_ratelimit_interval_ms", + ratelimit_state.interval, uint); +FAULT_CONFIGFS_ATTR_NAMED(ratelimit_burst, "verbose_ratelimit_burst", + ratelimit_state.burst, uint); +FAULT_CONFIGFS_ATTR_NAMED(task_filter, "task-filter", task_filter, bool); + +#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER + +static ssize_t fault_stacktrace_depth_show(struct config_item *item, char *page) +{ + return fault_ulong_attr_show(to_fault_config(item)->attr.stacktrace_depth, page); +} + +static ssize_t fault_stacktrace_depth_store(struct config_item *item, const char *page, + size_t count) +{ + int result; + unsigned long tmp; + + result = kstrtoul(page, 0, &tmp); + if (result < 0) + return result; + + to_fault_config(item)->attr.stacktrace_depth = + min_t(unsigned long, tmp, MAX_STACK_TRACE_DEPTH); + + return count; +} + +CONFIGFS_ATTR_NAMED(fault_, stacktrace_depth, "stacktrace-depth"); + +static ssize_t fault_xul_attr_show(unsigned long val, char *page) +{ + return snprintf(page, PAGE_SIZE, + sizeof(val) == sizeof(u32) ? "0x%08lx\n" : "0x%016lx\n", val); +} + +static ssize_t fault_xul_attr_store(unsigned long *val, const char *page, size_t count) +{ + return fault_ulong_attr_store(val, page, count); +} + +FAULT_CONFIGFS_ATTR_NAMED(require_start, "require-start", require_start, xul); +FAULT_CONFIGFS_ATTR_NAMED(require_end, "require-end", require_end, xul); +FAULT_CONFIGFS_ATTR_NAMED(reject_start, "reject-start", reject_start, xul); +FAULT_CONFIGFS_ATTR_NAMED(reject_end, "reject-end", reject_end, xul); + +#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ + +static struct configfs_attribute *fault_config_attrs[] = { + &fault_attr_probability, + &fault_attr_interval, + &fault_attr_times, + &fault_attr_space, + &fault_attr_verbose, + &fault_attr_ratelimit_interval, + &fault_attr_ratelimit_burst, + &fault_attr_task_filter, +#ifdef CONFIG_FAULT_INJECTION_STACKTRACE_FILTER + &fault_attr_stacktrace_depth, + &fault_attr_require_start, + &fault_attr_require_end, + &fault_attr_reject_start, + &fault_attr_reject_end, +#endif /* CONFIG_FAULT_INJECTION_STACKTRACE_FILTER */ + NULL, +}; + +static const struct config_item_type fault_config_type = { + .ct_attrs = fault_config_attrs, + .ct_owner = THIS_MODULE, +}; + +void fault_config_init(struct fault_config *config, const char *name) +{ + config_group_init_type_name(&config->group, name, &fault_config_type); +} +EXPORT_SYMBOL_GPL(fault_config_init); + +#endif /* CONFIG_FAULT_INJECTION_CONFIGFS */ diff --git a/lib/group_cpus.c b/lib/group_cpus.c index 9c837a35fef7..aa3f6815bb12 100644 --- a/lib/group_cpus.c +++ b/lib/group_cpus.c @@ -426,3 +426,4 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) return masks; } #endif /* CONFIG_SMP */ +EXPORT_SYMBOL_GPL(group_cpus_evenly); diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 274014e4eafe..960223ed9199 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -126,13 +126,13 @@ __out: \ iterate_buf(i, n, base, len, off, \ i->ubuf, (I)) \ } else if (likely(iter_is_iovec(i))) { \ - const struct iovec *iov = i->iov; \ + const struct iovec *iov = iter_iov(i); \ void __user *base; \ size_t len; \ iterate_iovec(i, n, base, len, off, \ iov, (I)) \ - i->nr_segs -= iov - i->iov; \ - i->iov = iov; \ + i->nr_segs -= iov - iter_iov(i); \ + i->__iov = iov; \ } else if (iov_iter_is_bvec(i)) { \ const struct bio_vec *bvec = i->bvec; \ void *base; \ @@ -172,6 +172,18 @@ static int copyout(void __user *to, const void *from, size_t n) return n; } +static int copyout_nofault(void __user *to, const void *from, size_t n) +{ + long res; + + if (should_fail_usercopy()) + return n; + + res = copy_to_user_nofault(to, from, n); + + return res < 0 ? n : res; +} + static int copyin(void *to, const void __user *from, size_t n) { size_t res = n; @@ -355,7 +367,7 @@ size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size) size_t skip; size -= count; - for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { + for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { size_t len = min(count, p->iov_len - skip); size_t ret; @@ -398,7 +410,7 @@ size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size) size_t skip; size -= count; - for (p = i->iov, skip = i->iov_offset; count; p++, skip = 0) { + for (p = iter_iov(i), skip = i->iov_offset; count; p++, skip = 0) { size_t len = min(count, p->iov_len - skip); size_t ret; @@ -422,10 +434,11 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, WARN_ON(direction & ~(READ | WRITE)); *i = (struct iov_iter) { .iter_type = ITER_IOVEC, + .copy_mc = false, .nofault = false, .user_backed = true, .data_source = direction, - .iov = iov, + .__iov = iov, .nr_segs = nr_segs, .iov_offset = 0, .count = count @@ -618,6 +631,14 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) EXPORT_SYMBOL_GPL(_copy_mc_to_iter); #endif /* CONFIG_ARCH_HAS_COPY_MC */ +static void *memcpy_from_iter(struct iov_iter *i, void *to, const void *from, + size_t size) +{ + if (iov_iter_is_copy_mc(i)) + return (void *)copy_mc_to_kernel(to, from, size); + return memcpy(to, from, size); +} + size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(!i->data_source)) @@ -627,7 +648,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i) might_fault(); iterate_and_advance(i, bytes, base, len, off, copyin(addr + off, base, len), - memcpy(addr + off, base, len) + memcpy_from_iter(i, addr + off, base, len) ) return bytes; @@ -734,6 +755,42 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_to_iter); +size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t bytes, + struct iov_iter *i) +{ + size_t res = 0; + + if (!page_copy_sane(page, offset, bytes)) + return 0; + if (WARN_ON_ONCE(i->data_source)) + return 0; + if (unlikely(iov_iter_is_pipe(i))) + return copy_page_to_iter_pipe(page, offset, bytes, i); + page += offset / PAGE_SIZE; // first subpage + offset %= PAGE_SIZE; + while (1) { + void *kaddr = kmap_local_page(page); + size_t n = min(bytes, (size_t)PAGE_SIZE - offset); + + iterate_and_advance(i, n, base, len, off, + copyout_nofault(base, kaddr + offset + off, len), + memcpy(base, kaddr + offset + off, len) + ) + kunmap_local(kaddr); + res += n; + bytes -= n; + if (!bytes || !n) + break; + offset += n; + if (offset == PAGE_SIZE) { + page++; + offset = 0; + } + } + return res; +} +EXPORT_SYMBOL(copy_page_to_iter_nofault); + size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, struct iov_iter *i) { @@ -814,7 +871,7 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt } iterate_and_advance(i, bytes, base, len, off, copyin(p + off, base, len), - memcpy(p + off, base, len) + memcpy_from_iter(i, p + off, base, len) ) kunmap_atomic(kaddr); return bytes; @@ -876,14 +933,14 @@ static void iov_iter_iovec_advance(struct iov_iter *i, size_t size) i->count -= size; size += i->iov_offset; // from beginning of current segment - for (iov = i->iov, end = iov + i->nr_segs; iov < end; iov++) { + for (iov = iter_iov(i), end = iov + i->nr_segs; iov < end; iov++) { if (likely(size < iov->iov_len)) break; size -= iov->iov_len; } i->iov_offset = size; - i->nr_segs -= iov - i->iov; - i->iov = iov; + i->nr_segs -= iov - iter_iov(i); + i->__iov = iov; } void iov_iter_advance(struct iov_iter *i, size_t size) @@ -958,12 +1015,12 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) unroll -= n; } } else { /* same logics for iovec and kvec */ - const struct iovec *iov = i->iov; + const struct iovec *iov = iter_iov(i); while (1) { size_t n = (--iov)->iov_len; i->nr_segs++; if (unroll <= n) { - i->iov = iov; + i->__iov = iov; i->iov_offset = n - unroll; return; } @@ -980,7 +1037,7 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i) { if (i->nr_segs > 1) { if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) - return min(i->count, i->iov->iov_len - i->iov_offset); + return min(i->count, iter_iov(i)->iov_len - i->iov_offset); if (iov_iter_is_bvec(i)) return min(i->count, i->bvec->bv_len - i->iov_offset); } |