summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpi_pad.c2
-rw-r--r--drivers/acpi/numa/hmat.c146
-rw-r--r--drivers/android/binder_alloc.c30
-rw-r--r--drivers/base/cacheinfo.c51
-rw-r--r--drivers/dax/kmem.c62
-rw-r--r--drivers/firmware/efi/unaccepted_memory.c20
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c29
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c4
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem_shrinker.c33
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h2
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c29
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c40
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/sysfs.c3
-rw-r--r--drivers/md/dm-bufio.c28
-rw-r--r--drivers/md/dm-cache-metadata.c2
-rw-r--r--drivers/md/dm-zoned-metadata.c28
-rw-r--r--drivers/md/raid5.c26
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/misc/vmw_balloon.c38
-rw-r--r--drivers/virtio/virtio_balloon.c24
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c17
27 files changed, 433 insertions, 226 deletions
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 32a2c006908b..bd1ad07f0290 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -101,7 +101,7 @@ static void round_robin_cpu(unsigned int tsk_index)
for_each_cpu(cpu, pad_busy_cpus)
cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
cpumask_andnot(tmp, cpu_online_mask, tmp);
- /* avoid HT sibilings if possible */
+ /* avoid HT siblings if possible */
if (cpumask_empty(tmp))
cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
if (cpumask_empty(tmp)) {
diff --git a/drivers/acpi/numa/hmat.c b/drivers/acpi/numa/hmat.c
index bba268ecd802..9ef5f1bdcfdb 100644
--- a/drivers/acpi/numa/hmat.c
+++ b/drivers/acpi/numa/hmat.c
@@ -24,6 +24,7 @@
#include <linux/node.h>
#include <linux/sysfs.h>
#include <linux/dax.h>
+#include <linux/memory-tiers.h>
static u8 hmat_revision;
static int hmat_disable __initdata;
@@ -582,28 +583,25 @@ static int initiators_to_nodemask(unsigned long *p_nodes)
return 0;
}
-static void hmat_register_target_initiators(struct memory_target *target)
+static void hmat_update_target_attrs(struct memory_target *target,
+ unsigned long *p_nodes, int access)
{
- static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
struct memory_initiator *initiator;
- unsigned int mem_nid, cpu_nid;
+ unsigned int cpu_nid;
struct memory_locality *loc = NULL;
u32 best = 0;
- bool access0done = false;
int i;
- mem_nid = pxm_to_node(target->memory_pxm);
+ bitmap_zero(p_nodes, MAX_NUMNODES);
/*
- * If the Address Range Structure provides a local processor pxm, link
+ * If the Address Range Structure provides a local processor pxm, set
* only that one. Otherwise, find the best performance attributes and
- * register all initiators that match.
+ * collect all initiators that match.
*/
if (target->processor_pxm != PXM_INVAL) {
cpu_nid = pxm_to_node(target->processor_pxm);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
- access0done = true;
- if (node_state(cpu_nid, N_CPU)) {
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
+ if (access == 0 || node_state(cpu_nid, N_CPU)) {
+ set_bit(target->processor_pxm, p_nodes);
return;
}
}
@@ -617,47 +615,10 @@ static void hmat_register_target_initiators(struct memory_target *target)
* We'll also use the sorting to prime the candidate nodes with known
* initiators.
*/
- bitmap_zero(p_nodes, MAX_NUMNODES);
list_sort(NULL, &initiators, initiator_cmp);
if (initiators_to_nodemask(p_nodes) < 0)
return;
- if (!access0done) {
- for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
- loc = localities_types[i];
- if (!loc)
- continue;
-
- best = 0;
- list_for_each_entry(initiator, &initiators, node) {
- u32 value;
-
- if (!test_bit(initiator->processor_pxm, p_nodes))
- continue;
-
- value = hmat_initiator_perf(target, initiator,
- loc->hmat_loc);
- if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
- bitmap_clear(p_nodes, 0, initiator->processor_pxm);
- if (value != best)
- clear_bit(initiator->processor_pxm, p_nodes);
- }
- if (best)
- hmat_update_target_access(target, loc->hmat_loc->data_type,
- best, 0);
- }
-
- for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
- cpu_nid = pxm_to_node(i);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
- }
- }
-
- /* Access 1 ignores Generic Initiators */
- bitmap_zero(p_nodes, MAX_NUMNODES);
- if (initiators_to_nodemask(p_nodes) < 0)
- return;
-
for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
loc = localities_types[i];
if (!loc)
@@ -667,7 +628,7 @@ static void hmat_register_target_initiators(struct memory_target *target)
list_for_each_entry(initiator, &initiators, node) {
u32 value;
- if (!initiator->has_cpu) {
+ if (access == 1 && !initiator->has_cpu) {
clear_bit(initiator->processor_pxm, p_nodes);
continue;
}
@@ -681,14 +642,33 @@ static void hmat_register_target_initiators(struct memory_target *target)
clear_bit(initiator->processor_pxm, p_nodes);
}
if (best)
- hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1);
+ hmat_update_target_access(target, loc->hmat_loc->data_type, best, access);
}
+}
+
+static void __hmat_register_target_initiators(struct memory_target *target,
+ unsigned long *p_nodes,
+ int access)
+{
+ unsigned int mem_nid, cpu_nid;
+ int i;
+
+ mem_nid = pxm_to_node(target->memory_pxm);
+ hmat_update_target_attrs(target, p_nodes, access);
for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
cpu_nid = pxm_to_node(i);
- register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
+ register_memory_node_under_compute_node(mem_nid, cpu_nid, access);
}
}
+static void hmat_register_target_initiators(struct memory_target *target)
+{
+ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+
+ __hmat_register_target_initiators(target, p_nodes, 0);
+ __hmat_register_target_initiators(target, p_nodes, 1);
+}
+
static void hmat_register_target_cache(struct memory_target *target)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
@@ -780,6 +760,61 @@ static int hmat_callback(struct notifier_block *self,
return NOTIFY_OK;
}
+static int hmat_set_default_dram_perf(void)
+{
+ int rc;
+ int nid, pxm;
+ struct memory_target *target;
+ struct node_hmem_attrs *attrs;
+
+ if (!default_dram_type)
+ return -EIO;
+
+ for_each_node_mask(nid, default_dram_type->nodes) {
+ pxm = node_to_pxm(nid);
+ target = find_mem_target(pxm);
+ if (!target)
+ continue;
+ attrs = &target->hmem_attrs[1];
+ rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int hmat_calculate_adistance(struct notifier_block *self,
+ unsigned long nid, void *data)
+{
+ static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
+ struct memory_target *target;
+ struct node_hmem_attrs *perf;
+ int *adist = data;
+ int pxm;
+
+ pxm = node_to_pxm(nid);
+ target = find_mem_target(pxm);
+ if (!target)
+ return NOTIFY_OK;
+
+ mutex_lock(&target_lock);
+ hmat_update_target_attrs(target, p_nodes, 1);
+ mutex_unlock(&target_lock);
+
+ perf = &target->hmem_attrs[1];
+
+ if (mt_perf_to_adistance(perf, adist))
+ return NOTIFY_OK;
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block hmat_adist_nb __meminitdata = {
+ .notifier_call = hmat_calculate_adistance,
+ .priority = 100,
+};
+
static __init void hmat_free_structures(void)
{
struct memory_target *target, *tnext;
@@ -862,8 +897,13 @@ static __init int hmat_init(void)
hmat_register_targets();
/* Keep the table and structures if the notifier may use them */
- if (!hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI))
- return 0;
+ if (hotplug_memory_notifier(hmat_callback, HMAT_CALLBACK_PRI))
+ goto out_put;
+
+ if (!hmat_set_default_dram_perf())
+ register_mt_adistance_algorithm(&hmat_adist_nb);
+
+ return 0;
out_put:
hmat_free_structures();
acpi_put_table(tbl);
diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index e3db8297095a..138f6d43d13b 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -1053,11 +1053,7 @@ binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
NULL, sc->nr_to_scan);
}
-static struct shrinker binder_shrinker = {
- .count_objects = binder_shrink_count,
- .scan_objects = binder_shrink_scan,
- .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *binder_shrinker;
/**
* binder_alloc_init() - called by binder_open() for per-proc initialization
@@ -1077,19 +1073,29 @@ void binder_alloc_init(struct binder_alloc *alloc)
int binder_alloc_shrinker_init(void)
{
- int ret = list_lru_init(&binder_alloc_lru);
+ int ret;
- if (ret == 0) {
- ret = register_shrinker(&binder_shrinker, "android-binder");
- if (ret)
- list_lru_destroy(&binder_alloc_lru);
+ ret = list_lru_init(&binder_alloc_lru);
+ if (ret)
+ return ret;
+
+ binder_shrinker = shrinker_alloc(0, "android-binder");
+ if (!binder_shrinker) {
+ list_lru_destroy(&binder_alloc_lru);
+ return -ENOMEM;
}
- return ret;
+
+ binder_shrinker->count_objects = binder_shrink_count;
+ binder_shrinker->scan_objects = binder_shrink_scan;
+
+ shrinker_register(binder_shrinker);
+
+ return 0;
}
void binder_alloc_shrinker_exit(void)
{
- unregister_shrinker(&binder_shrinker);
+ shrinker_free(binder_shrinker);
list_lru_destroy(&binder_alloc_lru);
}
diff --git a/drivers/base/cacheinfo.c b/drivers/base/cacheinfo.c
index cbae8be1fe52..f1e79263fe61 100644
--- a/drivers/base/cacheinfo.c
+++ b/drivers/base/cacheinfo.c
@@ -898,6 +898,48 @@ err:
return rc;
}
+/*
+ * Calculate the size of the per-CPU data cache slice. This can be
+ * used to estimate the size of the data cache slice that can be used
+ * by one CPU under ideal circumstances. UNIFIED caches are counted
+ * in addition to DATA caches. So, please consider code cache usage
+ * when use the result.
+ *
+ * Because the cache inclusive/non-inclusive information isn't
+ * available, we just use the size of the per-CPU slice of LLC to make
+ * the result more predictable across architectures.
+ */
+static void update_per_cpu_data_slice_size_cpu(unsigned int cpu)
+{
+ struct cpu_cacheinfo *ci;
+ struct cacheinfo *llc;
+ unsigned int nr_shared;
+
+ if (!last_level_cache_is_valid(cpu))
+ return;
+
+ ci = ci_cacheinfo(cpu);
+ llc = per_cpu_cacheinfo_idx(cpu, cache_leaves(cpu) - 1);
+
+ if (llc->type != CACHE_TYPE_DATA && llc->type != CACHE_TYPE_UNIFIED)
+ return;
+
+ nr_shared = cpumask_weight(&llc->shared_cpu_map);
+ if (nr_shared)
+ ci->per_cpu_data_slice_size = llc->size / nr_shared;
+}
+
+static void update_per_cpu_data_slice_size(bool cpu_online, unsigned int cpu)
+{
+ unsigned int icpu;
+
+ for_each_online_cpu(icpu) {
+ if (!cpu_online && icpu == cpu)
+ continue;
+ update_per_cpu_data_slice_size_cpu(icpu);
+ }
+}
+
static int cacheinfo_cpu_online(unsigned int cpu)
{
int rc = detect_cache_attributes(cpu);
@@ -906,7 +948,12 @@ static int cacheinfo_cpu_online(unsigned int cpu)
return rc;
rc = cache_add_dev(cpu);
if (rc)
- free_cache_attributes(cpu);
+ goto err;
+ update_per_cpu_data_slice_size(true, cpu);
+ setup_pcp_cacheinfo();
+ return 0;
+err:
+ free_cache_attributes(cpu);
return rc;
}
@@ -916,6 +963,8 @@ static int cacheinfo_cpu_pre_down(unsigned int cpu)
cpu_cache_sysfs_exit(cpu);
free_cache_attributes(cpu);
+ update_per_cpu_data_slice_size(false, cpu);
+ setup_pcp_cacheinfo();
return 0;
}
diff --git a/drivers/dax/kmem.c b/drivers/dax/kmem.c
index c57acb73e3db..369c698b7706 100644
--- a/drivers/dax/kmem.c
+++ b/drivers/dax/kmem.c
@@ -49,14 +49,52 @@ struct dax_kmem_data {
struct resource *res[];
};
-static struct memory_dev_type *dax_slowmem_type;
+static DEFINE_MUTEX(kmem_memory_type_lock);
+static LIST_HEAD(kmem_memory_types);
+
+static struct memory_dev_type *kmem_find_alloc_memory_type(int adist)
+{
+ bool found = false;
+ struct memory_dev_type *mtype;
+
+ mutex_lock(&kmem_memory_type_lock);
+ list_for_each_entry(mtype, &kmem_memory_types, list) {
+ if (mtype->adistance == adist) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) {
+ mtype = alloc_memory_type(adist);
+ if (!IS_ERR(mtype))
+ list_add(&mtype->list, &kmem_memory_types);
+ }
+ mutex_unlock(&kmem_memory_type_lock);
+
+ return mtype;
+}
+
+static void kmem_put_memory_types(void)
+{
+ struct memory_dev_type *mtype, *mtn;
+
+ mutex_lock(&kmem_memory_type_lock);
+ list_for_each_entry_safe(mtype, mtn, &kmem_memory_types, list) {
+ list_del(&mtype->list);
+ put_memory_type(mtype);
+ }
+ mutex_unlock(&kmem_memory_type_lock);
+}
+
static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
{
struct device *dev = &dev_dax->dev;
unsigned long total_len = 0;
struct dax_kmem_data *data;
+ struct memory_dev_type *mtype;
int i, rc, mapped = 0;
int numa_node;
+ int adist = MEMTIER_DEFAULT_DAX_ADISTANCE;
/*
* Ensure good NUMA information for the persistent memory.
@@ -71,6 +109,11 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
+ mt_calc_adistance(numa_node, &adist);
+ mtype = kmem_find_alloc_memory_type(adist);
+ if (IS_ERR(mtype))
+ return PTR_ERR(mtype);
+
for (i = 0; i < dev_dax->nr_range; i++) {
struct range range;
@@ -88,7 +131,7 @@ static int dev_dax_kmem_probe(struct dev_dax *dev_dax)
return -EINVAL;
}
- init_node_memory_type(numa_node, dax_slowmem_type);
+ init_node_memory_type(numa_node, mtype);
rc = -ENOMEM;
data = kzalloc(struct_size(data, res, dev_dax->nr_range), GFP_KERNEL);
@@ -167,7 +210,7 @@ err_reg_mgid:
err_res_name:
kfree(data);
err_dax_kmem_data:
- clear_node_memory_type(numa_node, dax_slowmem_type);
+ clear_node_memory_type(numa_node, mtype);
return rc;
}
@@ -219,7 +262,7 @@ static void dev_dax_kmem_remove(struct dev_dax *dev_dax)
* for that. This implies this reference will be around
* till next reboot.
*/
- clear_node_memory_type(node, dax_slowmem_type);
+ clear_node_memory_type(node, NULL);
}
}
#else
@@ -251,12 +294,6 @@ static int __init dax_kmem_init(void)
if (!kmem_name)
return -ENOMEM;
- dax_slowmem_type = alloc_memory_type(MEMTIER_DEFAULT_DAX_ADISTANCE);
- if (IS_ERR(dax_slowmem_type)) {
- rc = PTR_ERR(dax_slowmem_type);
- goto err_dax_slowmem_type;
- }
-
rc = dax_driver_register(&device_dax_kmem_driver);
if (rc)
goto error_dax_driver;
@@ -264,8 +301,7 @@ static int __init dax_kmem_init(void)
return rc;
error_dax_driver:
- put_memory_type(dax_slowmem_type);
-err_dax_slowmem_type:
+ kmem_put_memory_types();
kfree_const(kmem_name);
return rc;
}
@@ -275,7 +311,7 @@ static void __exit dax_kmem_exit(void)
dax_driver_unregister(&device_dax_kmem_driver);
if (!any_hotremove_failed)
kfree_const(kmem_name);
- put_memory_type(dax_slowmem_type);
+ kmem_put_memory_types();
}
MODULE_AUTHOR("Intel Corporation");
diff --git a/drivers/firmware/efi/unaccepted_memory.c b/drivers/firmware/efi/unaccepted_memory.c
index 135278ddaf62..3f2f7bf6e335 100644
--- a/drivers/firmware/efi/unaccepted_memory.c
+++ b/drivers/firmware/efi/unaccepted_memory.c
@@ -3,6 +3,7 @@
#include <linux/efi.h>
#include <linux/memblock.h>
#include <linux/spinlock.h>
+#include <linux/crash_dump.h>
#include <asm/unaccepted_memory.h>
/* Protects unaccepted memory bitmap and accepting_list */
@@ -201,3 +202,22 @@ bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end)
return ret;
}
+
+#ifdef CONFIG_PROC_VMCORE
+static bool unaccepted_memory_vmcore_pfn_is_ram(struct vmcore_cb *cb,
+ unsigned long pfn)
+{
+ return !pfn_is_unaccepted_memory(pfn);
+}
+
+static struct vmcore_cb vmcore_cb = {
+ .pfn_is_ram = unaccepted_memory_vmcore_pfn_is_ram,
+};
+
+static int __init unaccepted_memory_init_kdump(void)
+{
+ register_vmcore_cb(&vmcore_cb);
+ return 0;
+}
+core_initcall(unaccepted_memory_init_kdump);
+#endif /* CONFIG_PROC_VMCORE */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index 9cb7bbfb4278..d166052eb2ce 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -288,8 +288,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
static unsigned long
i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *i915 =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_i915_private *i915 = shrinker->private_data;
unsigned long num_objects;
unsigned long count;
@@ -306,8 +305,8 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
if (num_objects) {
unsigned long avg = 2 * count / num_objects;
- i915->mm.shrinker.batch =
- max((i915->mm.shrinker.batch + avg) >> 1,
+ i915->mm.shrinker->batch =
+ max((i915->mm.shrinker->batch + avg) >> 1,
128ul /* default SHRINK_BATCH */);
}
@@ -317,8 +316,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
static unsigned long
i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct drm_i915_private *i915 =
- container_of(shrinker, struct drm_i915_private, mm.shrinker);
+ struct drm_i915_private *i915 = shrinker->private_data;
unsigned long freed;
sc->nr_scanned = 0;
@@ -430,12 +428,17 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr
void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
{
- i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
- i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
- i915->mm.shrinker.seeks = DEFAULT_SEEKS;
- i915->mm.shrinker.batch = 4096;
- drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker,
- "drm-i915_gem"));
+ i915->mm.shrinker = shrinker_alloc(0, "drm-i915_gem");
+ if (!i915->mm.shrinker) {
+ drm_WARN_ON(&i915->drm, 1);
+ } else {
+ i915->mm.shrinker->scan_objects = i915_gem_shrinker_scan;
+ i915->mm.shrinker->count_objects = i915_gem_shrinker_count;
+ i915->mm.shrinker->batch = 4096;
+ i915->mm.shrinker->private_data = i915;
+
+ shrinker_register(i915->mm.shrinker);
+ }
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
@@ -451,7 +454,7 @@ void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
drm_WARN_ON(&i915->drm,
unregister_oom_notifier(&i915->mm.oom_notifier));
- unregister_shrinker(&i915->mm.shrinker);
+ shrinker_free(i915->mm.shrinker);
}
void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 6a2a78c61f21..dd452c220df7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -163,7 +163,7 @@ struct i915_gem_mm {
struct notifier_block oom_notifier;
struct notifier_block vmap_notifier;
- struct shrinker shrinker;
+ struct shrinker *shrinker;
#ifdef CONFIG_MMU_NOTIFIER
/**
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 443bbc3ed750..2aae7d107f33 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -265,7 +265,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv)
if (ret)
goto err_deinit_vram;
- msm_gem_shrinker_init(ddev);
+ ret = msm_gem_shrinker_init(ddev);
+ if (ret)
+ goto err_msm_uninit;
if (priv->kms_init) {
ret = msm_drm_kms_init(dev, drv);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 7dbd0f06898b..cd5bf658df66 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -218,7 +218,7 @@ struct msm_drm_private {
} vram;
struct notifier_block vmap_notifier;
- struct shrinker shrinker;
+ struct shrinker *shrinker;
struct drm_atomic_state *pm_state;
@@ -280,7 +280,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan);
#endif
-void msm_gem_shrinker_init(struct drm_device *dev);
+int msm_gem_shrinker_init(struct drm_device *dev);
void msm_gem_shrinker_cleanup(struct drm_device *dev);
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c
index f38296ad8743..5a7d48c02c4b 100644
--- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
@@ -34,8 +34,7 @@ static bool can_block(struct shrink_control *sc)
static unsigned long
msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct msm_drm_private *priv =
- container_of(shrinker, struct msm_drm_private, shrinker);
+ struct msm_drm_private *priv = shrinker->private_data;
unsigned count = priv->lru.dontneed.count;
if (can_swap())
@@ -100,8 +99,7 @@ active_evict(struct drm_gem_object *obj)
static unsigned long
msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct msm_drm_private *priv =
- container_of(shrinker, struct msm_drm_private, shrinker);
+ struct msm_drm_private *priv = shrinker->private_data;
struct {
struct drm_gem_lru *lru;
bool (*shrink)(struct drm_gem_object *obj);
@@ -148,10 +146,11 @@ msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
struct shrink_control sc = {
.nr_to_scan = nr_to_scan,
};
- int ret;
+ unsigned long ret = SHRINK_STOP;
fs_reclaim_acquire(GFP_KERNEL);
- ret = msm_gem_shrinker_scan(&priv->shrinker, &sc);
+ if (priv->shrinker)
+ ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
fs_reclaim_release(GFP_KERNEL);
return ret;
@@ -210,16 +209,24 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
*
* This function registers and sets up the msm shrinker.
*/
-void msm_gem_shrinker_init(struct drm_device *dev)
+int msm_gem_shrinker_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- priv->shrinker.count_objects = msm_gem_shrinker_count;
- priv->shrinker.scan_objects = msm_gem_shrinker_scan;
- priv->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&priv->shrinker, "drm-msm_gem"));
+
+ priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
+ if (!priv->shrinker)
+ return -ENOMEM;
+
+ priv->shrinker->count_objects = msm_gem_shrinker_count;
+ priv->shrinker->scan_objects = msm_gem_shrinker_scan;
+ priv->shrinker->private_data = priv;
+
+ shrinker_register(priv->shrinker);
priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
+
+ return 0;
}
/**
@@ -232,8 +239,8 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- if (priv->shrinker.nr_deferred) {
+ if (priv->shrinker) {
WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
- unregister_shrinker(&priv->shrinker);
+ shrinker_free(priv->shrinker);
}
}
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 1e85656dc2f7..1ef38f60d5dc 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -119,7 +119,7 @@ struct panfrost_device {
struct mutex shrinker_lock;
struct list_head shrinker_list;
- struct shrinker shrinker;
+ struct shrinker *shrinker;
struct panfrost_devfreq pfdevfreq;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index b834777b409b..7cabf4e3d1f2 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -659,10 +659,14 @@ static int panfrost_probe(struct platform_device *pdev)
if (err < 0)
goto err_out1;
- panfrost_gem_shrinker_init(ddev);
+ err = panfrost_gem_shrinker_init(ddev);
+ if (err)
+ goto err_out2;
return 0;
+err_out2:
+ drm_dev_unregister(ddev);
err_out1:
pm_runtime_disable(pfdev->dev);
panfrost_device_fini(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index 13c0a8149c3a..7516b7ecf7fe 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -86,7 +86,7 @@ panfrost_gem_mapping_get(struct panfrost_gem_object *bo,
void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping);
void panfrost_gem_teardown_mappings_locked(struct panfrost_gem_object *bo);
-void panfrost_gem_shrinker_init(struct drm_device *dev);
+int panfrost_gem_shrinker_init(struct drm_device *dev);
void panfrost_gem_shrinker_cleanup(struct drm_device *dev);
#endif /* __PANFROST_GEM_H__ */
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index 6a71a2555f85..3d9f51bd48b6 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -18,8 +18,7 @@
static unsigned long
panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct panfrost_device *pfdev =
- container_of(shrinker, struct panfrost_device, shrinker);
+ struct panfrost_device *pfdev = shrinker->private_data;
struct drm_gem_shmem_object *shmem;
unsigned long count = 0;
@@ -65,8 +64,7 @@ unlock_mappings:
static unsigned long
panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
{
- struct panfrost_device *pfdev =
- container_of(shrinker, struct panfrost_device, shrinker);
+ struct panfrost_device *pfdev = shrinker->private_data;
struct drm_gem_shmem_object *shmem, *tmp;
unsigned long freed = 0;
@@ -97,13 +95,21 @@ panfrost_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
*
* This function registers and sets up the panfrost shrinker.
*/
-void panfrost_gem_shrinker_init(struct drm_device *dev)
+int panfrost_gem_shrinker_init(struct drm_device *dev)
{
struct panfrost_device *pfdev = dev->dev_private;
- pfdev->shrinker.count_objects = panfrost_gem_shrinker_count;
- pfdev->shrinker.scan_objects = panfrost_gem_shrinker_scan;
- pfdev->shrinker.seeks = DEFAULT_SEEKS;
- WARN_ON(register_shrinker(&pfdev->shrinker, "drm-panfrost"));
+
+ pfdev->shrinker = shrinker_alloc(0, "drm-panfrost");
+ if (!pfdev->shrinker)
+ return -ENOMEM;
+
+ pfdev->shrinker->count_object