diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig | 16 | ||||
| -rw-r--r-- | lib/Kconfig.debug | 17 | ||||
| -rw-r--r-- | lib/Makefile | 8 | ||||
| -rw-r--r-- | lib/bitmap.c | 11 | ||||
| -rw-r--r-- | lib/btree.c | 30 | ||||
| -rw-r--r-- | lib/cpumask.c | 99 | ||||
| -rw-r--r-- | lib/devres.c | 15 | ||||
| -rw-r--r-- | lib/error-inject.c | 28 | ||||
| -rw-r--r-- | lib/flex_proportions.c | 10 | ||||
| -rw-r--r-- | lib/list_debug.c | 12 | ||||
| -rw-r--r-- | lib/livepatch/test_klp_callbacks_busy.c | 8 | ||||
| -rw-r--r-- | lib/lru_cache.c | 4 | ||||
| -rw-r--r-- | lib/lz4/lz4_decompress.c | 6 | ||||
| -rw-r--r-- | lib/lzo/lzo1x_compress.c | 6 | ||||
| -rw-r--r-- | lib/mpi/mpiutil.c | 2 | ||||
| -rw-r--r-- | lib/nodemask.c | 8 | ||||
| -rw-r--r-- | lib/radix-tree.c | 4 | ||||
| -rw-r--r-- | lib/scatterlist.c | 4 | ||||
| -rw-r--r-- | lib/smp_processor_id.c | 2 | ||||
| -rw-r--r-- | lib/stackdepot.c | 59 | ||||
| -rw-r--r-- | lib/test_bitmap.c | 68 | ||||
| -rw-r--r-- | lib/test_cpumask.c | 138 | ||||
| -rw-r--r-- | lib/test_free_pages.c | 2 | ||||
| -rw-r--r-- | lib/test_hmm.c | 347 | ||||
| -rw-r--r-- | lib/test_hmm_uapi.h | 19 | ||||
| -rw-r--r-- | lib/test_printf.c | 21 | ||||
| -rw-r--r-- | lib/test_vmalloc.c | 15 | ||||
| -rw-r--r-- | lib/trace_readwrite.c | 47 | ||||
| -rw-r--r-- | lib/ts_bm.c | 2 |
29 files changed, 716 insertions, 292 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index eaaad4d85bf2..dc1ab2ed1dc6 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -118,6 +118,13 @@ config INDIRECT_IOMEM_FALLBACK mmio accesses when the IO memory address is not a registered emulated region. +config TRACE_MMIO_ACCESS + bool "Register read/write tracing" + depends on TRACING && ARCH_HAVE_TRACE_MMIO_ACCESS + help + Create tracepoints for MMIO read/write operations. These trace events + can be used for logging all MMIO read/write operations. + source "lib/crypto/Kconfig" config LIB_MEMNEQ @@ -685,15 +692,6 @@ config STACKDEPOT_ALWAYS_INIT bool select STACKDEPOT -config STACK_HASH_ORDER - int "stack depot hash size (12 => 4KB, 20 => 1024KB)" - range 12 20 - default 20 - depends on STACKDEPOT - help - Select the hash size as a power of 2 for the stackdepot hash table. - Choose a lower value to reduce the memory impact. - config REF_TRACKER bool depends on STACKTRACE_SUPPORT diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 35cd8287642a..072e4b289c13 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -699,6 +699,14 @@ config DEBUG_OBJECTS_ENABLE_DEFAULT help Debug objects boot parameter default value +config SHRINKER_DEBUG + bool "Enable shrinker debugging support" + depends on DEBUG_FS + help + Say Y to enable the shrinker debugfs interface which provides + visibility into the kernel memory shrinkers subsystem. + Disable it to avoid an extra memory footprint. + config HAVE_DEBUG_KMEMLEAK bool @@ -2021,6 +2029,15 @@ config LKDTM Documentation on how to use the module can be found in Documentation/fault-injection/provoke-crashes.rst +config TEST_CPUMASK + tristate "cpumask tests" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable to turn on cpumask tests, running at boot or module load time. + + If unsure, say N. + config TEST_LIST_SORT tristate "Linked list sorting test" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Makefile b/lib/Makefile index 441795bf5d33..c95212141928 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -33,11 +33,10 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ flex_proportions.o ratelimit.o show_mem.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ - nmi_backtrace.o nodemask.o win_minmax.o memcat_p.o \ - buildid.o + nmi_backtrace.o win_minmax.o memcat_p.o \ + buildid.o cpumask.o lib-$(CONFIG_PRINTK) += dump_stack.o -lib-$(CONFIG_SMP) += cpumask.o lib-y += kobject.o klist.o obj-y += lockref.o @@ -100,6 +99,7 @@ obj-$(CONFIG_TEST_HMM) += test_hmm.o obj-$(CONFIG_TEST_FREE_PAGES) += test_free_pages.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_TEST_REF_TRACKER) += test_ref_tracker.o +obj-$(CONFIG_TEST_CPUMASK) += test_cpumask.o CFLAGS_test_fprobe.o += $(CC_FLAGS_FTRACE) obj-$(CONFIG_FPROBE_SANITY_TEST) += test_fprobe.o # @@ -151,6 +151,8 @@ lib-y += logic_pio.o lib-$(CONFIG_INDIRECT_IOMEM) += logic_iomem.o +obj-$(CONFIG_TRACE_MMIO_ACCESS) += trace_readwrite.o + obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_BTREE) += btree.o diff --git a/lib/bitmap.c b/lib/bitmap.c index b18e31ea6e66..488e6c3e5acc 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -237,7 +237,7 @@ void bitmap_cut(unsigned long *dst, const unsigned long *src, } EXPORT_SYMBOL(bitmap_cut); -int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, +bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; @@ -275,7 +275,7 @@ void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_xor); -int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, +bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, unsigned int bits) { unsigned int k; @@ -333,10 +333,9 @@ bool __bitmap_subset(const unsigned long *bitmap1, } EXPORT_SYMBOL(__bitmap_subset); -int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) +unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int bits) { - unsigned int k, lim = bits/BITS_PER_LONG; - int w = 0; + unsigned int k, lim = bits/BITS_PER_LONG, w = 0; for (k = 0; k < lim; k++) w += hweight_long(bitmap[k]); @@ -1564,7 +1563,7 @@ void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits) /* Clear tail bits in the last element of array beyond nbits. */ if (nbits % 64) - buf[-1] &= GENMASK_ULL(nbits % 64, 0); + buf[-1] &= GENMASK_ULL((nbits - 1) % 64, 0); } EXPORT_SYMBOL(bitmap_to_arr64); #endif diff --git a/lib/btree.c b/lib/btree.c index b4cf08a5c267..a82100c73b55 100644 --- a/lib/btree.c +++ b/lib/btree.c @@ -238,7 +238,7 @@ static int keyzero(struct btree_geo *geo, unsigned long *key) return 1; } -void *btree_lookup(struct btree_head *head, struct btree_geo *geo, +static void *btree_lookup_node(struct btree_head *head, struct btree_geo *geo, unsigned long *key) { int i, height = head->height; @@ -257,7 +257,16 @@ void *btree_lookup(struct btree_head *head, struct btree_geo *geo, if (!node) return NULL; } + return node; +} +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + int i; + unsigned long *node; + + node = btree_lookup_node(head, geo, key); if (!node) return NULL; @@ -271,23 +280,10 @@ EXPORT_SYMBOL_GPL(btree_lookup); int btree_update(struct btree_head *head, struct btree_geo *geo, unsigned long *key, void *val) { - int i, height = head->height; - unsigned long *node = head->node; - - if (height == 0) - return -ENOENT; - - for ( ; height > 1; height--) { - for (i = 0; i < geo->no_pairs; i++) - if (keycmp(geo, node, i, key) <= 0) - break; - if (i == geo->no_pairs) - return -ENOENT; - node = bval(geo, node, i); - if (!node) - return -ENOENT; - } + int i; + unsigned long *node; + node = btree_lookup_node(head, geo, key); if (!node) return -ENOENT; diff --git a/lib/cpumask.c b/lib/cpumask.c index a971a82d2f43..8baeb37e23d3 100644 --- a/lib/cpumask.c +++ b/lib/cpumask.c @@ -8,61 +8,6 @@ #include <linux/numa.h> /** - * cpumask_next - get the next cpu in a cpumask - * @n: the cpu prior to the place to search (ie. return will be > @n) - * @srcp: the cpumask pointer - * - * Returns >= nr_cpu_ids if no further cpus set. - */ -unsigned int cpumask_next(int n, const struct cpumask *srcp) -{ - /* -1 is a legal arg here. */ - if (n != -1) - cpumask_check(n); - return find_next_bit(cpumask_bits(srcp), nr_cpumask_bits, n + 1); -} -EXPORT_SYMBOL(cpumask_next); - -/** - * cpumask_next_and - get the next cpu in *src1p & *src2p - * @n: the cpu prior to the place to search (ie. return will be > @n) - * @src1p: the first cpumask pointer - * @src2p: the second cpumask pointer - * - * Returns >= nr_cpu_ids if no further cpus set in both. - */ -int cpumask_next_and(int n, const struct cpumask *src1p, - const struct cpumask *src2p) -{ - /* -1 is a legal arg here. */ - if (n != -1) - cpumask_check(n); - return find_next_and_bit(cpumask_bits(src1p), cpumask_bits(src2p), - nr_cpumask_bits, n + 1); -} -EXPORT_SYMBOL(cpumask_next_and); - -/** - * cpumask_any_but - return a "random" in a cpumask, but not this one. - * @mask: the cpumask to search - * @cpu: the cpu to ignore. - * - * Often used to find any cpu but smp_processor_id() in a mask. - * Returns >= nr_cpu_ids if no cpus set. - */ -int cpumask_any_but(const struct cpumask *mask, unsigned int cpu) -{ - unsigned int i; - - cpumask_check(cpu); - for_each_cpu(i, mask) - if (i != cpu) - break; - return i; -} -EXPORT_SYMBOL(cpumask_any_but); - -/** * cpumask_next_wrap - helper to implement for_each_cpu_wrap * @n: the cpu prior to the place to search * @mask: the cpumask pointer @@ -74,9 +19,9 @@ EXPORT_SYMBOL(cpumask_any_but); * Note: the @wrap argument is required for the start condition when * we cannot assume @start is set in @mask. */ -int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) +unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap) { - int next; + unsigned int next; again: next = cpumask_next(n, mask); @@ -125,34 +70,6 @@ bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) } EXPORT_SYMBOL(alloc_cpumask_var_node); -bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node) -{ - return alloc_cpumask_var_node(mask, flags | __GFP_ZERO, node); -} -EXPORT_SYMBOL(zalloc_cpumask_var_node); - -/** - * alloc_cpumask_var - allocate a struct cpumask - * @mask: pointer to cpumask_var_t where the cpumask is returned - * @flags: GFP_ flags - * - * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is - * a nop returning a constant 1 (in <linux/cpumask.h>). - * - * See alloc_cpumask_var_node. - */ -bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) -{ - return alloc_cpumask_var_node(mask, flags, NUMA_NO_NODE); -} -EXPORT_SYMBOL(alloc_cpumask_var); - -bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags) -{ - return alloc_cpumask_var(mask, flags | __GFP_ZERO); -} -EXPORT_SYMBOL(zalloc_cpumask_var); - /** * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena. * @mask: pointer to cpumask_var_t where the cpumask is returned @@ -192,6 +109,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) } #endif +#if NR_CPUS > 1 /** * cpumask_local_spread - select the i'th cpu with local numa cpu's first * @i: index number @@ -205,7 +123,7 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask) */ unsigned int cpumask_local_spread(unsigned int i, int node) { - int cpu; + unsigned int cpu; /* Wrap: we always want a cpu. */ i %= num_online_cpus(); @@ -243,10 +161,10 @@ static DEFINE_PER_CPU(int, distribute_cpu_mask_prev); * * Returns >= nr_cpu_ids if the intersection is empty. */ -int cpumask_any_and_distribute(const struct cpumask *src1p, +unsigned int cpumask_any_and_distribute(const struct cpumask *src1p, const struct cpumask *src2p) { - int next, prev; + unsigned int next, prev; /* NOTE: our first selection will skip 0. */ prev = __this_cpu_read(distribute_cpu_mask_prev); @@ -262,9 +180,9 @@ int cpumask_any_and_distribute(const struct cpumask *src1p, } EXPORT_SYMBOL(cpumask_any_and_distribute); -int cpumask_any_distribute(const struct cpumask *srcp) +unsigned int cpumask_any_distribute(const struct cpumask *srcp) { - int next, prev; + unsigned int next, prev; /* NOTE: our first selection will skip 0. */ prev = __this_cpu_read(distribute_cpu_mask_prev); @@ -279,3 +197,4 @@ int cpumask_any_distribute(const struct cpumask *srcp) return next; } EXPORT_SYMBOL(cpumask_any_distribute); +#endif /* NR_CPUS */ diff --git a/lib/devres.c b/lib/devres.c index 14664bbb4875..55eb07e80cbb 100644 --- a/lib/devres.c +++ b/lib/devres.c @@ -29,7 +29,8 @@ static void __iomem *__devm_ioremap(struct device *dev, resource_size_t offset, { void __iomem **ptr, *addr = NULL; - ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); + ptr = devres_alloc_node(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL, + dev_to_node(dev)); if (!ptr) return NULL; @@ -292,7 +293,8 @@ void __iomem *devm_ioport_map(struct device *dev, unsigned long port, { void __iomem **ptr, *addr; - ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); + ptr = devres_alloc_node(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL, + dev_to_node(dev)); if (!ptr) return NULL; @@ -366,7 +368,8 @@ void __iomem * const *pcim_iomap_table(struct pci_dev *pdev) if (dr) return dr->table; - new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); + new_dr = devres_alloc_node(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL, + dev_to_node(&pdev->dev)); if (!new_dr) return NULL; dr = devres_get(&pdev->dev, new_dr, NULL, NULL); @@ -548,7 +551,8 @@ int devm_arch_phys_wc_add(struct device *dev, unsigned long base, unsigned long int *mtrr; int ret; - mtrr = devres_alloc(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL); + mtrr = devres_alloc_node(devm_arch_phys_ac_add_release, sizeof(*mtrr), GFP_KERNEL, + dev_to_node(dev)); if (!mtrr) return -ENOMEM; @@ -593,7 +597,8 @@ int devm_arch_io_reserve_memtype_wc(struct device *dev, resource_size_t start, struct arch_io_reserve_memtype_wc_devres *dr; int ret; - dr = devres_alloc(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL); + dr = devres_alloc_node(devm_arch_io_free_memtype_wc_release, sizeof(*dr), GFP_KERNEL, + dev_to_node(dev)); if (!dr) return -ENOMEM; diff --git a/lib/error-inject.c b/lib/error-inject.c index 2ff5ef689d72..1afca1b1cdea 100644 --- a/lib/error-inject.c +++ b/lib/error-inject.c @@ -40,12 +40,18 @@ bool within_error_injection_list(unsigned long addr) int get_injectable_error_type(unsigned long addr) { struct ei_entry *ent; + int ei_type = EI_ETYPE_NONE; + mutex_lock(&ei_mutex); list_for_each_entry(ent, &error_injection_list, list) { - if (addr >= ent->start_addr && addr < ent->end_addr) - return ent->etype; + if (addr >= ent->start_addr && addr < ent->end_addr) { + ei_type = ent->etype; + break; + } } - return EI_ETYPE_NONE; + mutex_unlock(&ei_mutex); + + return ei_type; } /* @@ -197,24 +203,14 @@ static int ei_seq_show(struct seq_file *m, void *v) return 0; } -static const struct seq_operations ei_seq_ops = { +static const struct seq_operations ei_sops = { .start = ei_seq_start, .next = ei_seq_next, .stop = ei_seq_stop, .show = ei_seq_show, }; -static int ei_open(struct inode *inode, struct file *filp) -{ - return seq_open(filp, &ei_seq_ops); -} - -static const struct file_operations debugfs_ei_ops = { - .open = ei_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; +DEFINE_SEQ_ATTRIBUTE(ei); static int __init ei_debugfs_init(void) { @@ -224,7 +220,7 @@ static int __init ei_debugfs_init(void) if (!dir) return -ENOMEM; - file = debugfs_create_file("list", 0444, dir, NULL, &debugfs_ei_ops); + file = debugfs_create_file("list", 0444, dir, NULL, &ei_fops); if (!file) { debugfs_remove(dir); return -ENOMEM; diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index 53e7eb1dd76c..05cccbcf1661 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -63,18 +63,13 @@ void fprop_global_destroy(struct fprop_global *p) */ bool fprop_new_period(struct fprop_global *p, int periods) { - s64 events; - unsigned long flags; + s64 events = percpu_counter_sum(&p->events); - local_irq_save(flags); - events = percpu_counter_sum(&p->events); /* * Don't do anything if there are no events. */ - if (events <= 1) { - local_irq_restore(flags); + if (events <= 1) return false; - } write_seqcount_begin(&p->sequence); if (periods < 64) events -= events >> periods; @@ -82,7 +77,6 @@ bool fprop_new_period(struct fprop_global *p, int periods) percpu_counter_add(&p->events, -events); p->period += periods; write_seqcount_end(&p->sequence); - local_irq_restore(flags); return true; } diff --git a/lib/list_debug.c b/lib/list_debug.c index 9daa3fb9d1cd..d98d43f80958 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c @@ -20,7 +20,11 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev, struct list_head *next) { - if (CHECK_DATA_CORRUPTION(next->prev != prev, + if (CHECK_DATA_CORRUPTION(prev == NULL, + "list_add corruption. prev is NULL.\n") || + CHECK_DATA_CORRUPTION(next == NULL, + "list_add corruption. next is NULL.\n") || + CHECK_DATA_CORRUPTION(next->prev != prev, "list_add corruption. next->prev should be prev (%px), but was %px. (next=%px).\n", prev, next->prev, next) || CHECK_DATA_CORRUPTION(prev->next != next, @@ -42,7 +46,11 @@ bool __list_del_entry_valid(struct list_head *entry) prev = entry->prev; next = entry->next; - if (CHECK_DATA_CORRUPTION(next == LIST_POISON1, + if (CHECK_DATA_CORRUPTION(next == NULL, + "list_del corruption, %px->next is NULL\n", entry) || + CHECK_DATA_CORRUPTION(prev == NULL, + "list_del corruption, %px->prev is NULL\n", entry) || + CHECK_DATA_CORRUPTION(next == LIST_POISON1, "list_del corruption, %px->next is LIST_POISON1 (%px)\n", entry, LIST_POISON1) || CHECK_DATA_CORRUPTION(prev == LIST_POISON2, diff --git a/lib/livepatch/test_klp_callbacks_busy.c b/lib/livepatch/test_klp_callbacks_busy.c index 7ac845f65be5..133929e0ce8f 100644 --- a/lib/livepatch/test_klp_callbacks_busy.c +++ b/lib/livepatch/test_klp_callbacks_busy.c @@ -16,10 +16,12 @@ MODULE_PARM_DESC(block_transition, "block_transition (default=false)"); static void busymod_work_func(struct work_struct *work); static DECLARE_WORK(work, busymod_work_func); +static DECLARE_COMPLETION(busymod_work_started); static void busymod_work_func(struct work_struct *work) { pr_info("%s enter\n", __func__); + complete(&busymod_work_started); while (READ_ONCE(block_transition)) { /* @@ -37,6 +39,12 @@ static int test_klp_callbacks_busy_init(void) pr_info("%s\n", __func__); schedule_work(&work); + /* + * To synchronize kernel messages, hold the init function from + * exiting until the work function's entry message has printed. + */ + wait_for_completion(&busymod_work_started); + if (!block_transition) { /* * Serialize output: print all messages from the work diff --git a/lib/lru_cache.c b/lib/lru_cache.c index 52313acbfa62..dc35464216d3 100644 --- a/lib/lru_cache.c +++ b/lib/lru_cache.c @@ -147,8 +147,8 @@ struct lru_cache *lc_create(const char *name, struct kmem_cache *cache, return lc; /* else: could not allocate all elements, give up */ - for (i--; i; i--) { - void *p = element[i]; + while (i) { + void *p = element[--i]; kmem_cache_free(cache, p - e_off); } kfree(lc); diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c index fd1728d94bab..59fe69a63800 100644 --- a/lib/lz4/lz4_decompress.c +++ b/lib/lz4/lz4_decompress.c @@ -507,9 +507,9 @@ static int LZ4_decompress_safe_withSmallPrefix(const char *source, char *dest, (BYTE *)dest - prefixSize, NULL, 0); } -int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, - int compressedSize, int maxOutputSize, - const void *dictStart, size_t dictSize) +static int LZ4_decompress_safe_forceExtDict(const char *source, char *dest, + int compressedSize, int maxOutputSize, + const void *dictStart, size_t dictSize) { return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize, diff --git a/lib/lzo/lzo1x_compress.c b/lib/lzo/lzo1x_compress.c index 76758e9296ba..9d31e7126606 100644 --- a/lib/lzo/lzo1x_compress.c +++ b/lib/lzo/lzo1x_compress.c @@ -50,9 +50,7 @@ next: if (dv == 0 && bitstream_version) { const unsigned char *ir = ip + 4; - const unsigned char *limit = ip_end - < (ip + MAX_ZERO_RUN_LENGTH + 1) - ? ip_end : ip + MAX_ZERO_RUN_LENGTH + 1; + const unsigned char *limit = min(ip_end, ip + MAX_ZERO_RUN_LENGTH + 1); #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && \ defined(LZO_FAST_64BIT_MEMORY_ACCESS) u64 dv64; @@ -326,7 +324,7 @@ static int lzogeneric1x_1_compress(const unsigned char *in, size_t in_len, data_start = op; while (l > 20) { - size_t ll = l <= (m4_max_offset + 1) ? l : (m4_max_offset + 1); + size_t ll = min_t(size_t, l, m4_max_offset + 1); uintptr_t ll_end = (uintptr_t) ip + ll; if ((ll_end + ((t + ll) >> 5)) <= ll_end) break; diff --git a/lib/mpi/mpiutil.c b/lib/mpi/mpiutil.c index bc81419f400c..aa8c46544af8 100644 --- a/lib/mpi/mpiutil.c +++ b/lib/mpi/mpiutil.c @@ -272,7 +272,7 @@ MPI mpi_set_ui(MPI w, unsigned long u) if (!w) w = mpi_alloc(1); /* FIXME: If U is 0 we have no need to resize and thus possible - * allocating the the limbs. + * allocating the limbs. */ RESIZE_IF_NEEDED(w, 1); w->d[0] = u; diff --git a/lib/nodemask.c b/lib/nodemask.c index e22647f5181b..b8a433d16b51 100644 --- a/lib/nodemask.c +++ b/lib/nodemask.c @@ -3,14 +3,6 @@ #include <linux/module.h> #include <linux/random.h> -unsigned int __next_node_in(int node, const nodemask_t *srcp) -{ - unsigned int ret = __next_node(node, srcp); - - if (ret == MAX_NUMNODES) - ret = __first_node(srcp); - return ret; -} EXPORT_SYMBOL(__next_node_in); #ifdef CONFIG_NUMA diff --git a/lib/radix-tree.c b/lib/radix-tree.c index b3afafe46fff..3c78e1e8b2ad 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -677,7 +677,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node) } static inline int insert_entries(struct radix_tree_node *node, - void __rcu **slot, void *item, bool replace) + void __rcu **slot, void *item) { if (*slot) return -EEXIST; @@ -711,7 +711,7 @@ int radix_tree_insert(struct radix_tree_root *root, unsigned long index, if (error) return error; - error = insert_entries(node, slot, item, false); + error = insert_entries(node, slot, item); if (error < 0) return error; diff --git a/lib/scatterlist.c b/lib/scatterlist.c index d5e82e4a57ad..c8c3d675845c 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -240,7 +240,7 @@ EXPORT_SYMBOL(__sg_free_table); **/ void sg_free_append_table(struct sg_append_table *table) { - __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + __sg_free_table(&table->sgt, SG_MAX_SINGLE_ALLOC, 0, sg_kfree, table->total_nents); } EXPORT_SYMBOL(sg_free_append_table); @@ -253,7 +253,7 @@ EXPORT_SYMBOL(sg_free_append_table); **/ void sg_free_table(struct sg_table *table) { - __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree, table->orig_nents); } EXPORT_SYMBOL(sg_free_table); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 046ac6297c78..a2bb7738c373 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -47,9 +47,9 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2) printk("caller is %pS\n", __builtin_return_address(0)); dump_stack(); - instrumentation_end(); out_enable: + instrumentation_end(); preempt_enable_no_resched_notrace(); out: return this_cpu; diff --git a/lib/stackdepot.c b/lib/stackdepot.c index 5ca0d086ef4a..e73fda23388d 100644 --- a/lib/stackdepot.c +++ b/lib/stackdepot.c @@ -32,6 +32,7 @@ #include <linux/string.h> #include <linux/types.h> #include <linux/memblock.h> +#include <linux/kasan-enabled.h> #define DEPOT_STACK_BITS (sizeof(depot_stack_handle_t) * 8) @@ -145,10 +146,16 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) return stack; } -#define STACK_HASH_SIZE (1L << CONFIG_STACK_HASH_ORDER) -#define STACK_HASH_MASK (STACK_HASH_SIZE - 1) +/* one hash table bucket entry per 16kB of memory */ +#define STACK_HASH_SCALE 14 +/* limited between 4k and 1M buckets */ +#define STACK_HASH_ORDER_MIN 12 +#define STACK_HASH_ORDER_MAX 20 #define STACK_HASH_SEED 0x9747b28c +static unsigned int stack_hash_order; +static unsigned int stack_hash_mask; + static bool stack_depot_disable; static struct stack_record **stack_table; @@ -175,7 +182,7 @@ void __init stack_depot_want_early_init(void) int __init stack_depot_early_init(void) { - size_t size; + unsigned long entries = 0; /* This is supposed to be called only once, from mm_init() */ if (WARN_ON(__stack_depot_early_init_passed)) @@ -183,13 +190,23 @@ int __init stack_depot_early_init(void) __stack_depot_early_init_passed = true; + if (kasan_enabled() && !stack_hash_order) + stack_hash_order = STACK_HASH_ORDER_MAX; + if (!__stack_depot_want_early_init || stack_depot_disable) return 0; - size = (STACK_HASH_SIZE * sizeof(struct stack_record *)); - pr_info("Stack Depot early init allocating hash table with memblock_alloc, %zu bytes\n", - size); - stack_table = memblock_alloc(size, SMP_CACHE_BYTES); + if (stack_hash_order) + entries = 1UL << stack_hash_order; + stack_table = alloc_large_system_hash("stackdepot", + sizeof(struct stack_record *), + entries, + STACK_HASH_SCALE, + HASH_EARLY | HASH_ZERO, + NULL, + &stack_hash_mask, + 1UL << STACK_HASH_ORDER_MIN, + 1UL << STACK_HASH_ORDER_MAX); if (!stack_table) { pr_err("Stack Depot hash table allocation failed, disabling\n"); @@ -207,13 +224,35 @@ int stack_depot_init(void) mutex_lock(&stack_depot_init_mutex); if (!stack_depot_disable && !stack_table) { - pr_info("Stack Depot allocating hash table with kvcalloc\n"); - stack_table = kvcalloc(STACK_HASH_SIZE, sizeof(struct stack_record *), GFP_KERNEL); + unsigned long entries; + int scale = STACK_HASH_SCALE; + + if (stack_hash_order) { + entries = 1UL << stack_hash_order; + } else { + entries = nr_free_buffer_pages(); + entries = roundup_pow_of_two(entries); + + if (scale > PAGE_SHIFT) + entries >>= (scale - PAGE_SHIFT); + else + entries <<= (PAGE_SHIFT - scale); + } + + if (entries < 1UL << STACK_HASH_ORDER_MIN) + entries = 1UL << STACK_HASH_ORDER_MIN; + if (entries > 1UL << STACK_HASH_ORDER_MAX) + en |
