summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2026-01-01 16:39:19 +1000
committerDave Airlie <airlied@redhat.com>2026-01-01 16:39:54 +1000
commit1054f19572acbbec80e2339dbf61f2b40ffb918c (patch)
tree33945258ded8a2050db27c1d4e0e0a8381c1399a
parentf8f9c1f4d0c7a64600e2ca312dec824a0bc2f1da (diff)
parentbed2a6bd20681aacfb063015c1edfab6f58a333e (diff)
downloadlinux-1054f19572acbbec80e2339dbf61f2b40ffb918c.tar.gz
linux-1054f19572acbbec80e2339dbf61f2b40ffb918c.tar.bz2
linux-1054f19572acbbec80e2339dbf61f2b40ffb918c.zip
Merge tag 'drm-xe-fixes-2025-12-30' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes
Core Changes: - Ensure a SVM device memory allocation is idle before migration complete (Thomas) Driver Changes: - Fix a SVM debug printout (Thomas) - Use READ_ONCE() / WRITE_ONCE() for g2h_fence (Jonathan) Signed-off-by: Dave Airlie <airlied@redhat.com> From: Thomas Hellstrom <thomas.hellstrom@linux.intel.com> Link: https://patch.msgid.link/aVOTf6-whmkgrUuq@fedora
-rw-r--r--drivers/gpu/drm/drm_pagemap.c17
-rw-r--r--drivers/gpu/drm/xe/xe_guc_ct.c14
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c25
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.h6
-rw-r--r--drivers/gpu/drm/xe/xe_svm.c51
-rw-r--r--include/drm/drm_pagemap.h17
6 files changed, 99 insertions, 31 deletions
diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c
index 37d7cfbbb3e8..06c1bd8fc4d1 100644
--- a/drivers/gpu/drm/drm_pagemap.c
+++ b/drivers/gpu/drm/drm_pagemap.c
@@ -3,6 +3,7 @@
* Copyright © 2024-2025 Intel Corporation
*/
+#include <linux/dma-fence.h>
#include <linux/dma-mapping.h>
#include <linux/migrate.h>
#include <linux/pagemap.h>
@@ -408,10 +409,14 @@ int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
drm_pagemap_get_devmem_page(page, zdd);
}
- err = ops->copy_to_devmem(pages, pagemap_addr, npages);
+ err = ops->copy_to_devmem(pages, pagemap_addr, npages,
+ devmem_allocation->pre_migrate_fence);
if (err)
goto err_finalize;
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
+ devmem_allocation->pre_migrate_fence = NULL;
+
/* Upon success bind devmem allocation to range and zdd */
devmem_allocation->timeslice_expiration = get_jiffies_64() +
msecs_to_jiffies(timeslice_ms);
@@ -596,7 +601,7 @@ retry:
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -732,7 +737,7 @@ static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
for (i = 0; i < npages; ++i)
pages[i] = migrate_pfn_to_page(migrate.src[i]);
- err = ops->copy_to_ram(pages, pagemap_addr, npages);
+ err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
if (err)
goto err_finalize;
@@ -813,11 +818,14 @@ EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
* @ops: Pointer to the operations structure for GPU SVM device memory
* @dpagemap: The struct drm_pagemap we're allocating from.
* @size: Size of device memory allocation
+ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
+ * (May be NULL).
*/
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size)
+ struct drm_pagemap *dpagemap, size_t size,
+ struct dma_fence *pre_migrate_fence)
{
init_completion(&devmem_allocation->detached);
devmem_allocation->dev = dev;
@@ -825,6 +833,7 @@ void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
devmem_allocation->ops = ops;
devmem_allocation->dpagemap = dpagemap;
devmem_allocation->size = size;
+ devmem_allocation->pre_migrate_fence = pre_migrate_fence;
}
EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c
index 4ac434ad216f..a5019d1e741b 100644
--- a/drivers/gpu/drm/xe/xe_guc_ct.c
+++ b/drivers/gpu/drm/xe/xe_guc_ct.c
@@ -104,7 +104,9 @@ static void g2h_fence_cancel(struct g2h_fence *g2h_fence)
{
g2h_fence->cancel = true;
g2h_fence->fail = true;
- g2h_fence->done = true;
+
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
}
static bool g2h_fence_needs_alloc(struct g2h_fence *g2h_fence)
@@ -1203,10 +1205,13 @@ retry_same_fence:
return ret;
}
- ret = wait_event_timeout(ct->g2h_fence_wq, g2h_fence.done, HZ);
+ /* READ_ONCEs pairs with WRITE_ONCEs in parse_g2h_response
+ * and g2h_fence_cancel.
+ */
+ ret = wait_event_timeout(ct->g2h_fence_wq, READ_ONCE(g2h_fence.done), HZ);
if (!ret) {
LNL_FLUSH_WORK(&ct->g2h_worker);
- if (g2h_fence.done) {
+ if (READ_ONCE(g2h_fence.done)) {
xe_gt_warn(gt, "G2H fence %u, action %04x, done\n",
g2h_fence.seqno, action[0]);
ret = 1;
@@ -1454,7 +1459,8 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len)
g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN);
- g2h_fence->done = true;
+ /* WRITE_ONCE pairs with READ_ONCEs in guc_ct_send_recv. */
+ WRITE_ONCE(g2h_fence->done, true);
smp_mb();
wake_up_all(&ct->g2h_fence_wq);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 2184af413b91..5a95b08a4723 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -2062,6 +2062,7 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
unsigned long sram_offset,
struct drm_pagemap_addr *sram_addr,
u64 vram_addr,
+ struct dma_fence *deps,
const enum xe_migrate_copy_dir dir)
{
struct xe_gt *gt = m->tile->primary_gt;
@@ -2150,6 +2151,14 @@ static struct dma_fence *xe_migrate_vram(struct xe_migrate *m,
xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB);
+ if (deps && !dma_fence_is_signaled(deps)) {
+ dma_fence_get(deps);
+ err = drm_sched_job_add_dependency(&job->drm, deps);
+ if (err)
+ dma_fence_wait(deps, false);
+ err = 0;
+ }
+
mutex_lock(&m->job_mutex);
xe_sched_job_arm(job);
fence = dma_fence_get(&job->drm.s_fence->finished);
@@ -2175,6 +2184,8 @@ err:
* @npages: Number of pages to migrate.
* @src_addr: Array of DMA information (source of migrate)
* @dst_addr: Device physical address of VRAM (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from an array dma addresses to a VRAM device physical address
*
@@ -2184,10 +2195,11 @@ err:
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr)
+ u64 dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr,
- XE_MIGRATE_COPY_TO_VRAM);
+ deps, XE_MIGRATE_COPY_TO_VRAM);
}
/**
@@ -2196,6 +2208,8 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
* @npages: Number of pages to migrate.
* @src_addr: Device physical address of VRAM (source of migrate)
* @dst_addr: Array of DMA information (destination of migrate)
+ * @deps: struct dma_fence representing the dependencies that need
+ * to be signaled before migration.
*
* Copy from a VRAM device physical address to an array dma addresses
*
@@ -2205,10 +2219,11 @@ struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr)
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps)
{
return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr,
- XE_MIGRATE_COPY_TO_SRAM);
+ deps, XE_MIGRATE_COPY_TO_SRAM);
}
static void xe_migrate_dma_unmap(struct xe_device *xe,
@@ -2384,7 +2399,7 @@ int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo,
__fence = xe_migrate_vram(m, current_bytes,
(unsigned long)buf & ~PAGE_MASK,
&pagemap_addr[current_page],
- vram_addr, write ?
+ vram_addr, NULL, write ?
XE_MIGRATE_COPY_TO_VRAM :
XE_MIGRATE_COPY_TO_SRAM);
if (IS_ERR(__fence)) {
diff --git a/drivers/gpu/drm/xe/xe_migrate.h b/drivers/gpu/drm/xe/xe_migrate.h
index 260e298e5dd7..b76441f062b4 100644
--- a/drivers/gpu/drm/xe/xe_migrate.h
+++ b/drivers/gpu/drm/xe/xe_migrate.h
@@ -116,12 +116,14 @@ int xe_migrate_init(struct xe_migrate *m);
struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m,
unsigned long npages,
struct drm_pagemap_addr *src_addr,
- u64 dst_addr);
+ u64 dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m,
unsigned long npages,
u64 src_addr,
- struct drm_pagemap_addr *dst_addr);
+ struct drm_pagemap_addr *dst_addr,
+ struct dma_fence *deps);
struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
struct xe_bo *src_bo,
diff --git a/drivers/gpu/drm/xe/xe_svm.c b/drivers/gpu/drm/xe/xe_svm.c
index 55c5a0eb82e1..f97e0af6a9b0 100644
--- a/drivers/gpu/drm/xe/xe_svm.c
+++ b/drivers/gpu/drm/xe/xe_svm.c
@@ -476,7 +476,8 @@ static void xe_svm_copy_us_stats_incr(struct xe_gt *gt,
static int xe_svm_copy(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages, const enum xe_svm_copy_dir dir)
+ unsigned long npages, const enum xe_svm_copy_dir dir,
+ struct dma_fence *pre_migrate_fence)
{
struct xe_vram_region *vr = NULL;
struct xe_gt *gt = NULL;
@@ -565,7 +566,8 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_from_vram(vr->migrate,
i - pos + incr,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld",
@@ -574,13 +576,14 @@ static int xe_svm_copy(struct page **pages,
__fence = xe_migrate_to_vram(vr->migrate,
i - pos + incr,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -603,20 +606,22 @@ static int xe_svm_copy(struct page **pages,
vram_addr, (u64)pagemap_addr[pos].addr, 1);
__fence = xe_migrate_from_vram(vr->migrate, 1,
vram_addr,
- &pagemap_addr[pos]);
+ &pagemap_addr[pos],
+ pre_migrate_fence);
} else {
vm_dbg(&xe->drm,
"COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d",
(u64)pagemap_addr[pos].addr, vram_addr, 1);
__fence = xe_migrate_to_vram(vr->migrate, 1,
&pagemap_addr[pos],
- vram_addr);
+ vram_addr,
+ pre_migrate_fence);
}
if (IS_ERR(__fence)) {
err = PTR_ERR(__fence);
goto err_out;
}
-
+ pre_migrate_fence = NULL;
dma_fence_put(fence);
fence = __fence;
}
@@ -629,6 +634,8 @@ err_out:
dma_fence_wait(fence, false);
dma_fence_put(fence);
}
+ if (pre_migrate_fence)
+ dma_fence_wait(pre_migrate_fence, false);
/*
* XXX: We can't derive the GT here (or anywhere in this functions, but
@@ -645,16 +652,20 @@ err_out:
static int xe_svm_copy_to_devmem(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_VRAM,
+ pre_migrate_fence);
}
static int xe_svm_copy_to_ram(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages)
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence)
{
- return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM);
+ return xe_svm_copy(pages, pagemap_addr, npages, XE_SVM_COPY_TO_SRAM,
+ pre_migrate_fence);
}
static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation)
@@ -667,6 +678,7 @@ static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation)
struct xe_bo *bo = to_xe_bo(devmem_allocation);
struct xe_device *xe = xe_bo_device(bo);
+ dma_fence_put(devmem_allocation->pre_migrate_fence);
xe_bo_put_async(bo);
xe_pm_runtime_put(xe);
}
@@ -861,6 +873,7 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long timeslice_ms)
{
struct xe_vram_region *vr = container_of(dpagemap, typeof(*vr), dpagemap);
+ struct dma_fence *pre_migrate_fence = NULL;
struct xe_device *xe = vr->xe;
struct device *dev = xe->drm.dev;
struct drm_buddy_block *block;
@@ -887,8 +900,20 @@ static int xe_drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
break;
}
+ /* Ensure that any clearing or async eviction will complete before migration. */
+ if (!dma_resv_test_signaled(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL)) {
+ err = dma_resv_get_singleton(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ &pre_migrate_fence);
+ if (err)
+ dma_resv_wait_timeout(bo->ttm.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ else if (pre_migrate_fence)
+ dma_fence_enable_sw_signaling(pre_migrate_fence);
+ }
+
drm_pagemap_devmem_init(&bo->devmem_allocation, dev, mm,
- &dpagemap_devmem_ops, dpagemap, end - start);
+ &dpagemap_devmem_ops, dpagemap, end - start,
+ pre_migrate_fence);
blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks;
list_for_each_entry(block, blocks, link)
@@ -941,7 +966,7 @@ bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vm
xe_assert(vm->xe, IS_DGFX(vm->xe));
if (xe_svm_range_in_vram(range)) {
- drm_info(&vm->xe->drm, "Range is already in VRAM\n");
+ drm_dbg(&vm->xe->drm, "Range is already in VRAM\n");
return false;
}
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index f6e7e234c089..70a7991f784f 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -8,6 +8,7 @@
#define NR_PAGES(order) (1U << (order))
+struct dma_fence;
struct drm_pagemap;
struct drm_pagemap_zdd;
struct device;
@@ -174,6 +175,8 @@ struct drm_pagemap_devmem_ops {
* @pages: Pointer to array of device memory pages (destination)
* @pagemap_addr: Pointer to array of DMA information (source)
* @npages: Number of pages to copy
+ * @pre_migrate_fence: dma-fence to wait for before migration start.
+ * May be NULL.
*
* Copy pages to device memory. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
@@ -183,13 +186,16 @@ struct drm_pagemap_devmem_ops {
*/
int (*copy_to_devmem)(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages);
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence);
/**
* @copy_to_ram: Copy to system RAM (required for migration)
* @pages: Pointer to array of device memory pages (source)
* @pagemap_addr: Pointer to array of DMA information (destination)
* @npages: Number of pages to copy
+ * @pre_migrate_fence: dma-fence to wait for before migration start.
+ * May be NULL.
*
* Copy pages to system RAM. If the order of a @pagemap_addr entry
* is greater than 0, the entry is populated but subsequent entries
@@ -199,7 +205,8 @@ struct drm_pagemap_devmem_ops {
*/
int (*copy_to_ram)(struct page **pages,
struct drm_pagemap_addr *pagemap_addr,
- unsigned long npages);
+ unsigned long npages,
+ struct dma_fence *pre_migrate_fence);
};
/**
@@ -212,6 +219,8 @@ struct drm_pagemap_devmem_ops {
* @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
* @size: Size of device memory allocation
* @timeslice_expiration: Timeslice expiration in jiffies
+ * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
+ * (May be NULL).
*/
struct drm_pagemap_devmem {
struct device *dev;
@@ -221,6 +230,7 @@ struct drm_pagemap_devmem {
struct drm_pagemap *dpagemap;
size_t size;
u64 timeslice_expiration;
+ struct dma_fence *pre_migrate_fence;
};
int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
@@ -238,7 +248,8 @@ struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
struct device *dev, struct mm_struct *mm,
const struct drm_pagemap_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size);
+ struct drm_pagemap *dpagemap, size_t size,
+ struct dma_fence *pre_migrate_fence);
int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
unsigned long start, unsigned long end,