From 34f50cc6441b7fee4a86495d5ef43da5d254bad9 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 2 Oct 2024 06:16:38 -0700 Subject: drm/sched: Use drm sched lockdep map for submit_wq MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid leaking a lockdep map on each drm sched creation and destruction by using a single lockdep map for all drm sched allocated submit_wq. v2: - Use alloc_ordered_workqueue_lockdep_map (Tejun) Cc: Luben Tuikov Cc: Christian König Signed-off-by: Matthew Brost Reviewed-by: Nirmoy Das Acked-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20241002131639.3425022-2-matthew.brost@intel.com Signed-off-by: Maarten Lankhorst --- drivers/gpu/drm/scheduler/sched_main.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'drivers') diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 6f27cab0b76d..eaef20f41786 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -87,6 +87,12 @@ #define CREATE_TRACE_POINTS #include "gpu_scheduler_trace.h" +#ifdef CONFIG_LOCKDEP +static struct lockdep_map drm_sched_lockdep_map = { + .name = "drm_sched_lockdep_map" +}; +#endif + #define to_drm_sched_job(sched_job) \ container_of((sched_job), struct drm_sched_job, queue_node) @@ -1269,7 +1275,12 @@ int drm_sched_init(struct drm_gpu_scheduler *sched, sched->submit_wq = submit_wq; sched->own_submit_wq = false; } else { +#ifdef CONFIG_LOCKDEP + sched->submit_wq = alloc_ordered_workqueue_lockdep_map(name, 0, + &drm_sched_lockdep_map); +#else sched->submit_wq = alloc_ordered_workqueue(name, 0); +#endif if (!sched->submit_wq) return -ENOMEM; -- cgit v1.2.3 From 9286a191abe2ea01b34be577e8a09a412dcbb644 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Wed, 2 Oct 2024 06:16:39 -0700 Subject: drm/xe: Drop GuC submit_wq pool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that drm sched uses a single lockdep map for all submit_wq, drop the GuC submit_wq pool hack. Signed-off-by: Matthew Brost Reviewed-by: Nirmoy Das Link: https://patchwork.freedesktop.org/patch/msgid/20241002131639.3425022-3-matthew.brost@intel.com Signed-off-by: Maarten Lankhorst Acked-by: Thomas Hellström --- drivers/gpu/drm/xe/xe_guc_submit.c | 60 +------------------------------------- drivers/gpu/drm/xe/xe_guc_types.h | 7 ----- 2 files changed, 1 insertion(+), 66 deletions(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c index fbbe6a487bbb..17c25f18e286 100644 --- a/drivers/gpu/drm/xe/xe_guc_submit.c +++ b/drivers/gpu/drm/xe/xe_guc_submit.c @@ -224,64 +224,11 @@ static bool exec_queue_killed_or_banned_or_wedged(struct xe_exec_queue *q) EXEC_QUEUE_STATE_BANNED)); } -#ifdef CONFIG_PROVE_LOCKING -static int alloc_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) { - guc->submission_state.submit_wq_pool[i] = - alloc_ordered_workqueue("submit_wq", 0); - if (!guc->submission_state.submit_wq_pool[i]) - goto err_free; - } - - return 0; - -err_free: - while (i) - destroy_workqueue(guc->submission_state.submit_wq_pool[--i]); - - return -ENOMEM; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - int i; - - for (i = 0; i < NUM_SUBMIT_WQ; ++i) - destroy_workqueue(guc->submission_state.submit_wq_pool[i]); -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - int idx = guc->submission_state.submit_wq_idx++ % NUM_SUBMIT_WQ; - - return guc->submission_state.submit_wq_pool[idx]; -} -#else -static int alloc_submit_wq(struct xe_guc *guc) -{ - return 0; -} - -static void free_submit_wq(struct xe_guc *guc) -{ - -} - -static struct workqueue_struct *get_submit_wq(struct xe_guc *guc) -{ - return NULL; -} -#endif - static void guc_submit_fini(struct drm_device *drm, void *arg) { struct xe_guc *guc = arg; xa_destroy(&guc->submission_state.exec_queue_lookup); - free_submit_wq(guc); } static void guc_submit_wedged_fini(void *arg) @@ -337,10 +284,6 @@ int xe_guc_submit_init(struct xe_guc *guc, unsigned int num_ids) if (err) return err; - err = alloc_submit_wq(guc); - if (err) - return err; - gt->exec_queue_ops = &guc_exec_queue_ops; xa_init(&guc->submission_state.exec_queue_lookup); @@ -1452,8 +1395,7 @@ static int guc_exec_queue_init(struct xe_exec_queue *q) timeout = (q->vm && xe_vm_in_lr_mode(q->vm)) ? MAX_SCHEDULE_TIMEOUT : msecs_to_jiffies(q->sched_props.job_timeout_ms); err = xe_sched_init(&ge->sched, &drm_sched_ops, &xe_sched_ops, - get_submit_wq(guc), - q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, + NULL, q->lrc[0]->ring.size / MAX_JOB_SIZE_BYTES, 64, timeout, guc_to_gt(guc)->ordered_wq, NULL, q->name, gt_to_xe(q->gt)->drm.dev); if (err) diff --git a/drivers/gpu/drm/xe/xe_guc_types.h b/drivers/gpu/drm/xe/xe_guc_types.h index 546ac6350a31..585f5c274f09 100644 --- a/drivers/gpu/drm/xe/xe_guc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_types.h @@ -72,13 +72,6 @@ struct xe_guc { atomic_t stopped; /** @submission_state.lock: protects submission state */ struct mutex lock; -#ifdef CONFIG_PROVE_LOCKING -#define NUM_SUBMIT_WQ 256 - /** @submission_state.submit_wq_pool: submission ordered workqueues pool */ - struct workqueue_struct *submit_wq_pool[NUM_SUBMIT_WQ]; - /** @submission_state.submit_wq_idx: submission ordered workqueue index */ - int submit_wq_idx; -#endif /** @submission_state.enabled: submission is enabled */ bool enabled; } submission_state; -- cgit v1.2.3 From 301d194d01f3074efcf3a10eae116fcddb76788a Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 17 Sep 2024 13:08:56 +0100 Subject: drm/nouveau/gsp: remove extraneous ; after mutex The mutex field has two following semicolons, replace this with just one semicolon. Signed-off-by: Colin Ian King Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20240917120856.1877733-1-colin.i.king@gmail.com --- drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h index 9e6f39912368..a2055f2a014a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/gsp.h @@ -210,7 +210,7 @@ struct nvkm_gsp { } *rm; struct { - struct mutex mutex;; + struct mutex mutex; struct idr idr; } client_id; -- cgit v1.2.3 From 7d1fd3638ee3a9f9bca4785fffb638ca19120718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Fri, 4 Oct 2024 10:02:29 -0300 Subject: drm/v3d: Stop the active perfmon before being destroyed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When running `kmscube` with one or more performance monitors enabled via `GALLIUM_HUD`, the following kernel panic can occur: [ 55.008324] Unable to handle kernel paging request at virtual address 00000000052004a4 [ 55.008368] Mem abort info: [ 55.008377] ESR = 0x0000000096000005 [ 55.008387] EC = 0x25: DABT (current EL), IL = 32 bits [ 55.008402] SET = 0, FnV = 0 [ 55.008412] EA = 0, S1PTW = 0 [ 55.008421] FSC = 0x05: level 1 translation fault [ 55.008434] Data abort info: [ 55.008442] ISV = 0, ISS = 0x00000005, ISS2 = 0x00000000 [ 55.008455] CM = 0, WnR = 0, TnD = 0, TagAccess = 0 [ 55.008467] GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 [ 55.008481] user pgtable: 4k pages, 39-bit VAs, pgdp=00000001046c6000 [ 55.008497] [00000000052004a4] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000 [ 55.008525] Internal error: Oops: 0000000096000005 [#1] PREEMPT SMP [ 55.008542] Modules linked in: rfcomm [...] vc4 v3d snd_soc_hdmi_codec drm_display_helper gpu_sched drm_shmem_helper cec drm_dma_helper drm_kms_helper i2c_brcmstb drm drm_panel_orientation_quirks snd_soc_core snd_compress snd_pcm_dmaengine snd_pcm snd_timer snd backlight [ 55.008799] CPU: 2 PID: 166 Comm: v3d_bin Tainted: G C 6.6.47+rpt-rpi-v8 #1 Debian 1:6.6.47-1+rpt1 [ 55.008824] Hardware name: Raspberry Pi 4 Model B Rev 1.5 (DT) [ 55.008838] pstate: 20000005 (nzCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 55.008855] pc : __mutex_lock.constprop.0+0x90/0x608 [ 55.008879] lr : __mutex_lock.constprop.0+0x58/0x608 [ 55.008895] sp : ffffffc080673cf0 [ 55.008904] x29: ffffffc080673cf0 x28: 0000000000000000 x27: ffffff8106188a28 [ 55.008926] x26: ffffff8101e78040 x25: ffffff8101baa6c0 x24: ffffffd9d989f148 [ 55.008947] x23: ffffffda1c2a4008 x22: 0000000000000002 x21: ffffffc080673d38 [ 55.008968] x20: ffffff8101238000 x19: ffffff8104f83188 x18: 0000000000000000 [ 55.008988] x17: 0000000000000000 x16: ffffffda1bd04d18 x15: 00000055bb08bc90 [ 55.009715] x14: 0000000000000000 x13: 0000000000000000 x12: ffffffda1bd4cbb0 [ 55.010433] x11: 00000000fa83b2da x10: 0000000000001a40 x9 : ffffffda1bd04d04 [ 55.011162] x8 : ffffff8102097b80 x7 : 0000000000000000 x6 : 00000000030a5857 [ 55.011880] x5 : 00ffffffffffffff x4 : 0300000005200470 x3 : 0300000005200470 [ 55.012598] x2 : ffffff8101238000 x1 : 0000000000000021 x0 : 0300000005200470 [ 55.013292] Call trace: [ 55.013959] __mutex_lock.constprop.0+0x90/0x608 [ 55.014646] __mutex_lock_slowpath+0x1c/0x30 [ 55.015317] mutex_lock+0x50/0x68 [ 55.015961] v3d_perfmon_stop+0x40/0xe0 [v3d] [ 55.016627] v3d_bin_job_run+0x10c/0x2d8 [v3d] [ 55.017282] drm_sched_main+0x178/0x3f8 [gpu_sched] [ 55.017921] kthread+0x11c/0x128 [ 55.018554] ret_from_fork+0x10/0x20 [ 55.019168] Code: f9400260 f1001c1f 54001ea9 927df000 (b9403401) [ 55.019776] ---[ end trace 0000000000000000 ]--- [ 55.020411] note: v3d_bin[166] exited with preempt_count 1 This issue arises because, upon closing the file descriptor (which happens when we interrupt `kmscube`), the active performance monitor is not stopped. Although all perfmons are destroyed in `v3d_perfmon_close_file()`, the active performance monitor's pointer (`v3d->active_perfmon`) is still retained. If `kmscube` is run again, the driver will attempt to stop the active performance monitor using the stale pointer in `v3d->active_perfmon`. However, this pointer is no longer valid because the previous process has already terminated, and all performance monitors associated with it have been destroyed and freed. To fix this, when the active performance monitor belongs to a given process, explicitly stop it before destroying and freeing it. Cc: stable@vger.kernel.org # v5.15+ Closes: https://github.com/raspberrypi/linux/issues/6389 Fixes: 26a4dc29b74a ("drm/v3d: Expose performance counters to userspace") Signed-off-by: Maíra Canal Reviewed-by: Juan A. Suarez Link: https://patchwork.freedesktop.org/patch/msgid/20241004130625.918580-2-mcanal@igalia.com --- drivers/gpu/drm/v3d/v3d_perfmon.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/v3d/v3d_perfmon.c b/drivers/gpu/drm/v3d/v3d_perfmon.c index cd7f1eedf17f..00cd081d7873 100644 --- a/drivers/gpu/drm/v3d/v3d_perfmon.c +++ b/drivers/gpu/drm/v3d/v3d_perfmon.c @@ -306,6 +306,11 @@ void v3d_perfmon_open_file(struct v3d_file_priv *v3d_priv) static int v3d_perfmon_idr_del(int id, void *elem, void *data) { struct v3d_perfmon *perfmon = elem; + struct v3d_dev *v3d = (struct v3d_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == v3d->active_perfmon) + v3d_perfmon_stop(v3d, perfmon, false); v3d_perfmon_put(perfmon); @@ -314,8 +319,10 @@ static int v3d_perfmon_idr_del(int id, void *elem, void *data) void v3d_perfmon_close_file(struct v3d_file_priv *v3d_priv) { + struct v3d_dev *v3d = v3d_priv->v3d; + mutex_lock(&v3d_priv->perfmon.lock); - idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, NULL); + idr_for_each(&v3d_priv->perfmon.idr, v3d_perfmon_idr_del, v3d); idr_destroy(&v3d_priv->perfmon.idr); mutex_unlock(&v3d_priv->perfmon.lock); mutex_destroy(&v3d_priv->perfmon.lock); -- cgit v1.2.3 From 0b2ad4f6f2bec74a5287d96cb2325a5e11706f22 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ma=C3=ADra=20Canal?= Date: Fri, 4 Oct 2024 09:36:00 -0300 Subject: drm/vc4: Stop the active perfmon before being destroyed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Upon closing the file descriptor, the active performance monitor is not stopped. Although all perfmons are destroyed in `vc4_perfmon_close_file()`, the active performance monitor's pointer (`vc4->active_perfmon`) is still retained. If we open a new file descriptor and submit a few jobs with performance monitors, the driver will attempt to stop the active performance monitor using the stale pointer in `vc4->active_perfmon`. However, this pointer is no longer valid because the previous process has already terminated, and all performance monitors associated with it have been destroyed and freed. To fix this, when the active performance monitor belongs to a given process, explicitly stop it before destroying and freeing it. Cc: stable@vger.kernel.org # v4.17+ Cc: Boris Brezillon Cc: Juan A. Suarez Romero Fixes: 65101d8c9108 ("drm/vc4: Expose performance counters to userspace") Signed-off-by: Maíra Canal Reviewed-by: Juan A. Suarez Link: https://patchwork.freedesktop.org/patch/msgid/20241004123817.890016-2-mcanal@igalia.com --- drivers/gpu/drm/vc4/vc4_perfmon.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index c4ac2c946238..c00a5cc2316d 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -116,6 +116,11 @@ void vc4_perfmon_open_file(struct vc4_file *vc4file) static int vc4_perfmon_idr_del(int id, void *elem, void *data) { struct vc4_perfmon *perfmon = elem; + struct vc4_dev *vc4 = (struct vc4_dev *)data; + + /* If the active perfmon is being destroyed, stop it first */ + if (perfmon == vc4->active_perfmon) + vc4_perfmon_stop(vc4, perfmon, false); vc4_perfmon_put(perfmon); @@ -130,7 +135,7 @@ void vc4_perfmon_close_file(struct vc4_file *vc4file) return; mutex_lock(&vc4file->perfmon.lock); - idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); + idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, vc4); idr_destroy(&vc4file->perfmon.idr); mutex_unlock(&vc4file->perfmon.lock); mutex_destroy(&vc4file->perfmon.lock); -- cgit v1.2.3 From 04e0481526e30ab8c7e7580033d2f88b7ef2da3f Mon Sep 17 00:00:00 2001 From: Yonatan Maman Date: Tue, 8 Oct 2024 14:59:42 +0300 Subject: nouveau/dmem: Fix privileged error in copy engine channel When `nouveau_dmem_copy_one` is called, the following error occurs: [272146.675156] nouveau 0000:06:00.0: fifo: PBDMA9: 00000004 [HCE_PRIV] ch 1 00000300 00003386 This indicates that a copy push command triggered a Host Copy Engine Privileged error on channel 1 (Copy Engine channel). To address this issue, modify the Copy Engine channel to allow privileged push commands Fixes: 6de125383a5c ("drm/nouveau/fifo: expose runlist topology info on all chipsets") Signed-off-by: Yonatan Maman Co-developed-by: Gal Shalom Signed-off-by: Gal Shalom Reviewed-by: Ben Skeggs Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20241008115943.990286-2-ymaman@nvidia.com --- drivers/gpu/drm/nouveau/nouveau_drm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index f6e78dba594f..34985771b2a2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -331,7 +331,7 @@ nouveau_accel_ce_init(struct nouveau_drm *drm) return; } - ret = nouveau_channel_new(&drm->client, false, runm, NvDmaFB, NvDmaTT, &drm->cechan); + ret = nouveau_channel_new(&drm->client, true, runm, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); } -- cgit v1.2.3 From 835745a377a4519decd1a36d6b926e369b3033e2 Mon Sep 17 00:00:00 2001 From: Yonatan Maman Date: Tue, 8 Oct 2024 14:59:43 +0300 Subject: nouveau/dmem: Fix vulnerability in migrate_to_ram upon copy error The `nouveau_dmem_copy_one` function ensures that the copy push command is sent to the device firmware but does not track whether it was executed successfully. In the case of a copy error (e.g., firmware or hardware failure), the copy push command will be sent via the firmware channel, and `nouveau_dmem_copy_one` will likely report success, leading to the `migrate_to_ram` function returning a dirty HIGH_USER page to the user. This can result in a security vulnerability, as a HIGH_USER page that may contain sensitive or corrupted data could be returned to the user. To prevent this vulnerability, we allocate a zero page. Thus, in case of an error, a non-dirty (zero) page will be returned to the user. Fixes: 5be73b690875 ("drm/nouveau/dmem: device memory helpers for SVM") Signed-off-by: Yonatan Maman Co-developed-by: Gal Shalom Signed-off-by: Gal Shalom Reviewed-by: Ben Skeggs Cc: stable@vger.kernel.org Signed-off-by: Danilo Krummrich Link: https://patchwork.freedesktop.org/patch/msgid/20241008115943.990286-3-ymaman@nvidia.com --- drivers/gpu/drm/nouveau/nouveau_dmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 1f2d649f4b96..1a072568cef6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -193,7 +193,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) if (!spage || !(src & MIGRATE_PFN_MIGRATE)) goto done; - dpage = alloc_page_vma(GFP_HIGHUSER, vmf->vma, vmf->address); + dpage = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vmf->vma, vmf->address); if (!dpage) goto done; -- cgit v1.2.3 From fcddc71ec7ecf15b4df3c41288c9cf0b8e886111 Mon Sep 17 00:00:00 2001 From: Janne Grunau Date: Sun, 6 Oct 2024 19:49:45 +0200 Subject: drm/fbdev-dma: Only cleanup deferred I/O if necessary Commit 5a498d4d06d6 ("drm/fbdev-dma: Only install deferred I/O if necessary") initializes deferred I/O only if it is used. drm_fbdev_dma_fb_destroy() however calls fb_deferred_io_cleanup() unconditionally with struct fb_info.fbdefio == NULL. KASAN with the out-of-tree Apple silicon display driver posts following warning from __flush_work() of a random struct work_struct instead of the expected NULL pointer derefs. [ 22.053799] ------------[ cut here ]------------ [ 22.054832] WARNING: CPU: 2 PID: 1 at kernel/workqueue.c:4177 __flush_work+0x4d8/0x580 [ 22.056597] Modules linked in: uhid bnep uinput nls_ascii ip6_tables ip_tables i2c_dev loop fuse dm_multipath nfnetlink zram hid_magicmouse btrfs xor xor_neon brcmfmac_wcc raid6_pq hci_bcm4377 bluetooth brcmfmac hid_apple brcmutil nvmem_spmi_mfd simple_mfd_spmi dockchannel_hid cfg80211 joydev regmap_spmi nvme_apple ecdh_generic ecc macsmc_hid rfkill dwc3 appledrm snd_soc_macaudio macsmc_power nvme_core apple_isp phy_apple_atc apple_sart apple_rtkit_helper apple_dockchannel tps6598x macsmc_hwmon snd_soc_cs42l84 videobuf2_v4l2 spmi_apple_controller nvmem_apple_efuses videobuf2_dma_sg apple_z2 videobuf2_memops spi_nor panel_summit videobuf2_common asahi videodev pwm_apple apple_dcp snd_soc_apple_mca apple_admac spi_apple clk_apple_nco i2c_pasemi_platform snd_pcm_dmaengine mc i2c_pasemi_core mux_core ofpart adpdrm drm_dma_helper apple_dart apple_soc_cpufreq leds_pwm phram [ 22.073768] CPU: 2 UID: 0 PID: 1 Comm: systemd-shutdow Not tainted 6.11.2-asahi+ #asahi-dev [ 22.075612] Hardware name: Apple MacBook Pro (13-inch, M2, 2022) (DT) [ 22.077032] pstate: 01400005 (nzcv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--) [ 22.078567] pc : __flush_work+0x4d8/0x580 [ 22.079471] lr : __flush_work+0x54/0x580 [ 22.080345] sp : ffffc000836ef820 [ 22.081089] x29: ffffc000836ef880 x28: 0000000000000000 x27: ffff80002ddb7128 [ 22.082678] x26: dfffc00000000000 x25: 1ffff000096f0c57 x24: ffffc00082d3e358 [ 22.084263] x23: ffff80004b7862b8 x22: dfffc00000000000 x21: ffff80005aa1d470 [ 22.085855] x20: ffff80004b786000 x19: ffff80004b7862a0 x18: 0000000000000000 [ 22.087439] x17: 0000000000000000 x16: 0000000000000000 x15: 0000000000000005 [ 22.089030] x14: 1ffff800106ddf0a x13: 0000000000000000 x12: 0000000000000000 [ 22.090618] x11: ffffb800106ddf0f x10: dfffc00000000000 x9 : 1ffff800106ddf0e [ 22.092206] x8 : 0000000000000000 x7 : aaaaaaaaaaaaaaaa x6 : 0000000000000001 [ 22.093790] x5 : ffffc000836ef728 x4 : 0000000000000000 x3 : 0000000000000020 [ 22.095368] x2 : 0000000000000008 x1 : 00000000000000aa x0 : 0000000000000000 [ 22.096955] Call trace: [ 22.097505] __flush_work+0x4d8/0x580 [ 22.098330] flush_delayed_work+0x80/0xb8 [ 22.099231] fb_deferred_io_cleanup+0x3c/0x130 [ 22.100217] drm_fbdev_dma_fb_destroy+0x6c/0xe0 [drm_dma_helper] [ 22.101559] unregister_framebuffer+0x210/0x2f0 [ 22.102575] drm_fb_helper_unregister_info+0x48/0x60 [ 22.103683] drm_fbdev_dma_client_unregister+0x4c/0x80 [drm_dma_helper] [ 22.105147] drm_client_dev_unregister+0x1cc/0x230 [ 22.106217] drm_dev_unregister+0x58/0x570 [ 22.107125] apple_drm_unbind+0x50/0x98 [appledrm] [ 22.108199] component_del+0x1f8/0x3a8 [ 22.109042] dcp_platform_shutdown+0x24/0x38 [apple_dcp] [ 22.110357] platform_shutdown+0x70/0x90 [ 22.111219] device_shutdown+0x368/0x4d8 [ 22.112095] kernel_restart+0x6c/0x1d0 [ 22.112946] __arm64_sys_reboot+0x1c8/0x328 [ 22.113868] invoke_syscall+0x78/0x1a8 [ 22.114703] do_el0_svc+0x124/0x1a0 [ 22.115498] el0_svc+0x3c/0xe0 [ 22.116181] el0t_64_sync_handler+0x70/0xc0 [ 22.117110] el0t_64_sync+0x190/0x198 [ 22.117931] ---[ end trace 0000000000000000 ]--- Signed-off-by: Janne Grunau Fixes: 5a498d4d06d6 ("drm/fbdev-dma: Only install deferred I/O if necessary") Reviewed-by: Thomas Zimmermann Reviewed-by: Linus Walleij Signed-off-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/ZwLNuZL-8Gh5UUQb@robin --- drivers/gpu/drm/drm_fbdev_dma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers') diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c index b0602c4f3628..51c2d742d199 100644 --- a/drivers/gpu/drm/drm_fbdev_dma.c +++ b/drivers/gpu/drm/drm_fbdev_dma.c @@ -50,7 +50,8 @@ static void drm_fbdev_dma_fb_destroy(struct fb_info *info) if (!fb_helper->dev) return; - fb_deferred_io_cleanup(info); + if (info->fbdefio) + fb_deferred_io_cleanup(info); drm_fb_helper_fini(fb_helper); drm_client_buffer_vunmap(fb_helper->buffer); -- cgit v1.2.3