summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorFeng Tang <feng.tang@intel.com>2024-09-11 14:45:33 +0800
committerVlastimil Babka <vbabka@suse.cz>2024-10-29 10:43:23 +0100
commit1e4df1859ec2d09fdfe184e7a92a476f01f64e34 (patch)
tree16834662c0eb792de73e6d25a99ca047034a3a6e /mm
parentfb5eda0dfe2256b468fc4e95207a4df88457274f (diff)
downloadlinux-1e4df1859ec2d09fdfe184e7a92a476f01f64e34.tar.gz
linux-1e4df1859ec2d09fdfe184e7a92a476f01f64e34.tar.bz2
linux-1e4df1859ec2d09fdfe184e7a92a476f01f64e34.zip
mm/slub: Move krealloc() and related code to slub.c
This is a preparation for the following refactoring of krealloc(), for more efficient function calling as it will call some internal functions defined in slub.c. Signed-off-by: Feng Tang <feng.tang@intel.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab_common.c84
-rw-r--r--mm/slub.c84
2 files changed, 84 insertions, 84 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 552b92dfdac7..f1f90491df03 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1190,90 +1190,6 @@ module_init(slab_proc_init);
#endif /* CONFIG_SLUB_DEBUG */
-static __always_inline __realloc_size(2) void *
-__do_krealloc(const void *p, size_t new_size, gfp_t flags)
-{
- void *ret;
- size_t ks;
-
- /* Check for double-free before calling ksize. */
- if (likely(!ZERO_OR_NULL_PTR(p))) {
- if (!kasan_check_byte(p))
- return NULL;
- ks = ksize(p);
- } else
- ks = 0;
-
- /* If the object still fits, repoison it precisely. */
- if (ks >= new_size) {
- /* Zero out spare memory. */
- if (want_init_on_alloc(flags)) {
- kasan_disable_current();
- memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
- kasan_enable_current();
- }
-
- p = kasan_krealloc((void *)p, new_size, flags);
- return (void *)p;
- }
-
- ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
- if (ret && p) {
- /* Disable KASAN checks as the object's redzone is accessed. */
- kasan_disable_current();
- memcpy(ret, kasan_reset_tag(p), ks);
- kasan_enable_current();
- }
-
- return ret;
-}
-
-/**
- * krealloc - reallocate memory. The contents will remain unchanged.
- * @p: object to reallocate memory for.
- * @new_size: how many bytes of memory are required.
- * @flags: the type of memory to allocate.
- *
- * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
- * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
- *
- * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
- * initial memory allocation, every subsequent call to this API for the same
- * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
- * __GFP_ZERO is not fully honored by this API.
- *
- * This is the case, since krealloc() only knows about the bucket size of an
- * allocation (but not the exact size it was allocated with) and hence
- * implements the following semantics for shrinking and growing buffers with
- * __GFP_ZERO.
- *
- * new bucket
- * 0 size size
- * |--------|----------------|
- * | keep | zero |
- *
- * In any case, the contents of the object pointed to are preserved up to the
- * lesser of the new and old sizes.
- *
- * Return: pointer to the allocated memory or %NULL in case of error
- */
-void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
-{
- void *ret;
-
- if (unlikely(!new_size)) {
- kfree(p);
- return ZERO_SIZE_PTR;
- }
-
- ret = __do_krealloc(p, new_size, flags);
- if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
- kfree(p);
-
- return ret;
-}
-EXPORT_SYMBOL(krealloc_noprof);
-
/**
* kfree_sensitive - Clear sensitive information in memory before freeing
* @p: object to free memory of
diff --git a/mm/slub.c b/mm/slub.c
index 83579b637578..2ccb016c7f99 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -4711,6 +4711,90 @@ void kfree(const void *object)
}
EXPORT_SYMBOL(kfree);
+static __always_inline __realloc_size(2) void *
+__do_krealloc(const void *p, size_t new_size, gfp_t flags)
+{
+ void *ret;
+ size_t ks;
+
+ /* Check for double-free before calling ksize. */
+ if (likely(!ZERO_OR_NULL_PTR(p))) {
+ if (!kasan_check_byte(p))
+ return NULL;
+ ks = ksize(p);
+ } else
+ ks = 0;
+
+ /* If the object still fits, repoison it precisely. */
+ if (ks >= new_size) {
+ /* Zero out spare memory. */
+ if (want_init_on_alloc(flags)) {
+ kasan_disable_current();
+ memset(kasan_reset_tag(p) + new_size, 0, ks - new_size);
+ kasan_enable_current();
+ }
+
+ p = kasan_krealloc((void *)p, new_size, flags);
+ return (void *)p;
+ }
+
+ ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
+ if (ret && p) {
+ /* Disable KASAN checks as the object's redzone is accessed. */
+ kasan_disable_current();
+ memcpy(ret, kasan_reset_tag(p), ks);
+ kasan_enable_current();
+ }
+
+ return ret;
+}
+
+/**
+ * krealloc - reallocate memory. The contents will remain unchanged.
+ * @p: object to reallocate memory for.
+ * @new_size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
+ * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
+ *
+ * If __GFP_ZERO logic is requested, callers must ensure that, starting with the
+ * initial memory allocation, every subsequent call to this API for the same
+ * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that
+ * __GFP_ZERO is not fully honored by this API.
+ *
+ * This is the case, since krealloc() only knows about the bucket size of an
+ * allocation (but not the exact size it was allocated with) and hence
+ * implements the following semantics for shrinking and growing buffers with
+ * __GFP_ZERO.
+ *
+ * new bucket
+ * 0 size size
+ * |--------|----------------|
+ * | keep | zero |
+ *
+ * In any case, the contents of the object pointed to are preserved up to the
+ * lesser of the new and old sizes.
+ *
+ * Return: pointer to the allocated memory or %NULL in case of error
+ */
+void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
+{
+ void *ret;
+
+ if (unlikely(!new_size)) {
+ kfree(p);
+ return ZERO_SIZE_PTR;
+ }
+
+ ret = __do_krealloc(p, new_size, flags);
+ if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
+ kfree(p);
+
+ return ret;
+}
+EXPORT_SYMBOL(krealloc_noprof);
+
struct detached_freelist {
struct slab *slab;
void *tail;