summaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2014-05-07 13:15:46 +0200
committerIngo Molnar <mingo@kernel.org>2014-05-07 13:15:46 +0200
commit2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc (patch)
tree9478e8cf470c1d5bdb2d89b57a7e35919ab95e72 /mm/vmalloc.c
parent08f8aeb55d7727d644dbbbbfb798fe937d47751d (diff)
parent2b4cfe64dee0d84506b951d81bf55d9891744d25 (diff)
downloadlinux-2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc.tar.gz
linux-2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc.tar.bz2
linux-2fe5de9ce7d57498abc14b375cad2fcf8c3ee6cc.zip
Merge branch 'sched/urgent' into sched/core, to avoid conflicts
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c10
1 files changed, 9 insertions, 1 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0fdf96803c5b..bf233b283319 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -27,7 +27,9 @@
#include <linux/pfn.h>
#include <linux/kmemleak.h>
#include <linux/atomic.h>
+#include <linux/compiler.h>
#include <linux/llist.h>
+
#include <asm/uaccess.h>
#include <asm/tlbflush.h>
#include <asm/shmparam.h>
@@ -1083,6 +1085,12 @@ EXPORT_SYMBOL(vm_unmap_ram);
* @node: prefer to allocate data structures on this node
* @prot: memory protection to use. PAGE_KERNEL for regular RAM
*
+ * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
+ * faster than vmap so it's good. But if you mix long-life and short-life
+ * objects with vm_map_ram(), it could consume lots of address space through
+ * fragmentation (especially on a 32bit machine). You could see failures in
+ * the end. Please use this function for short-lived objects.
+ *
* Returns: a pointer to the address that has been mapped, or %NULL on failure
*/
void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
@@ -2181,7 +2189,7 @@ EXPORT_SYMBOL(remap_vmalloc_range);
* Implement a stub for vmalloc_sync_all() if the architecture chose not to
* have one.
*/
-void __attribute__((weak)) vmalloc_sync_all(void)
+void __weak vmalloc_sync_all(void)
{
}