diff options
author | Christian Brauner <brauner@kernel.org> | 2023-08-23 13:06:55 +0200 |
---|---|---|
committer | Christian Brauner <brauner@kernel.org> | 2023-08-23 13:06:55 +0200 |
commit | 3fb5a6562adef115d8a8c3e19cc9d5fae32e93c8 (patch) | |
tree | 7a0cc61be2ba75aab26403f330da3140424b6b76 /drivers/gpu/drm/scheduler/sched_entity.c | |
parent | 051178c366bbc1bf8b4aba5ca5519d7da453c95f (diff) | |
parent | 59ba4fdd2d1f9dd7af98f5168c846150c9aec56d (diff) | |
download | linux-3fb5a6562adef115d8a8c3e19cc9d5fae32e93c8.tar.gz linux-3fb5a6562adef115d8a8c3e19cc9d5fae32e93c8.tar.bz2 linux-3fb5a6562adef115d8a8c3e19cc9d5fae32e93c8.zip |
Merge tag 'vfs-6.6-merge-2' of ssh://gitolite.kernel.org/pub/scm/fs/xfs/xfs-linux
Pull filesystem freezing updates from Darrick Wong:
New code for 6.6:
* Allow the kernel to initiate a freeze of a filesystem. The kernel
and userspace can both hold a freeze on a filesystem at the same
time; the freeze is not lifted until /both/ holders lift it. This
will enable us to fix a longstanding bug in XFS online fsck.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Message-Id: <20230822182604.GB11286@frogsfrogsfrogs>
Signed-off-by: Christian Brauner <brauner@kernel.org>
Diffstat (limited to 'drivers/gpu/drm/scheduler/sched_entity.c')
-rw-r--r-- | drivers/gpu/drm/scheduler/sched_entity.c | 41 |
1 files changed, 33 insertions, 8 deletions
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index b2bbc8a68b30..a42763e1429d 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -176,16 +176,32 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, { struct drm_sched_job *job = container_of(cb, struct drm_sched_job, finish_cb); - int r; + unsigned long index; dma_fence_put(f); /* Wait for all dependencies to avoid data corruptions */ - while (!xa_empty(&job->dependencies)) { - f = xa_erase(&job->dependencies, job->last_dependency++); - r = dma_fence_add_callback(f, &job->finish_cb, - drm_sched_entity_kill_jobs_cb); - if (!r) + xa_for_each(&job->dependencies, index, f) { + struct drm_sched_fence *s_fence = to_drm_sched_fence(f); + + if (s_fence && f == &s_fence->scheduled) { + /* The dependencies array had a reference on the scheduled + * fence, and the finished fence refcount might have + * dropped to zero. Use dma_fence_get_rcu() so we get + * a NULL fence in that case. + */ + f = dma_fence_get_rcu(&s_fence->finished); + + /* Now that we have a reference on the finished fence, + * we can release the reference the dependencies array + * had on the scheduled fence. + */ + dma_fence_put(&s_fence->scheduled); + } + + xa_erase(&job->dependencies, index); + if (f && !dma_fence_add_callback(f, &job->finish_cb, + drm_sched_entity_kill_jobs_cb)) return; dma_fence_put(f); @@ -415,8 +431,17 @@ static struct dma_fence * drm_sched_job_dependency(struct drm_sched_job *job, struct drm_sched_entity *entity) { - if (!xa_empty(&job->dependencies)) - return xa_erase(&job->dependencies, job->last_dependency++); + struct dma_fence *f; + + /* We keep the fence around, so we can iterate over all dependencies + * in drm_sched_entity_kill_jobs_cb() to ensure all deps are signaled + * before killing the job. + */ + f = xa_load(&job->dependencies, job->last_dependency); + if (f) { + job->last_dependency++; + return dma_fence_get(f); + } if (job->sched->ops->prepare_job) return job->sched->ops->prepare_job(job, entity); |