diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-15 19:04:27 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-07-15 19:04:27 -0700 |
commit | be8454afc50f43016ca8b6130d9673bdd0bd56ec (patch) | |
tree | 897e49c1ccadeed9b083a3ffc13f0dd2d6d7d874 /drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | |
parent | fec88ab0af9706b2201e5daf377c5031c62d11f7 (diff) | |
parent | 3729fe2bc2a01f4cc1aa88be8f64af06084c87d6 (diff) | |
download | linux-be8454afc50f43016ca8b6130d9673bdd0bd56ec.tar.gz linux-be8454afc50f43016ca8b6130d9673bdd0bd56ec.tar.bz2 linux-be8454afc50f43016ca8b6130d9673bdd0bd56ec.zip |
Merge tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie:
"The biggest thing in this is the AMD Navi GPU support, this again
contains a bunch of header files that are large. These are the new AMD
RX5700 GPUs that just recently became available.
New drivers:
- ST-Ericsson MCDE driver
- Ingenic JZ47xx SoC
UAPI change:
- HDR source metadata property
Core:
- HDR inforframes and EDID parsing
- drm hdmi infoframe unpacking
- remove prime sg_table caching into dma-buf
- New gem vram helpers to reduce driver code
- Lots of drmP.h removal
- reservation fencing fix
- documentation updates
- drm_fb_helper_connector removed
- mode name command handler rewrite
fbcon:
- Remove the fbcon notifiers
ttm:
- forward progress fixes
dma-buf:
- make mmap call optional
- debugfs refcount fixes
- dma-fence free with pending signals fix
- each dma-buf gets an inode
Panels:
- Lots of additional panel bindings
amdgpu:
- initial navi10 support
- avoid hw reset
- HDR metadata support
- new thermal sensors for vega asics
- RAS fixes
- use HMM rather than MMU notifier
- xgmi topology via kfd
- SR-IOV fixes
- driver reload fixes
- DC use a core bpc attribute
- Aux fixes for DC
- Bandwidth calc updates for DC
- Clock handling refactor
- kfd VEGAM support
vmwgfx:
- Coherent memory support changes
i915:
- HDR Support
- HDMI i2c link
- Icelake multi-segmented gamma support
- GuC firmware update
- Mule Creek Canyon PCH support for EHL
- EHL platform updtes
- move i915.alpha_support to i915.force_probe
- runtime PM refactoring
- VBT parsing refactoring
- DSI fixes
- struct mutex dependency reduction
- GEM code reorg
mali-dp:
- Komeda driver features
msm:
- dsi vs EPROBE_DEFER fixes
- msm8998 snapdragon 835 support
- a540 gpu support
- mdp5 and dpu interconnect support
exynos:
- drmP.h removal
tegra:
- misc fixes
tda998x:
- audio support improvements
- pixel repeated mode support
- quantisation range handling corrections
- HDMI vendor info fix
armada:
- interlace support fix
- overlay/video plane register handling refactor
- add gamma support
rockchip:
- RX3328 support
panfrost:
- expose perf counters via hidden ioctls
vkms:
- enumerate CRC sources list
ast:
- rework BO handling
mgag200:
- rework BO handling
dw-hdmi:
- suspend/resume support
rcar-du:
- R8A774A1 Soc Support
- LVDS dual-link mode support
- Additional formats
- Misc fixes
omapdrm:
- DSI command mode display support
stm
- fb modifier support
- runtime PM support
sun4i:
- use vmap ops
vc4:
- binner bo binding rework
v3d:
- compute shader support
- resync/sync fixes
- job management refactoring
lima:
- NULL pointer in irq handler fix
- scheduler default timeout
virtio:
- fence seqno support
- trace events
bochs:
- misc fixes
tc458767:
- IRQ/HDP handling
sii902x:
- HDMI audio support
atmel-hlcdc:
- misc fixes
meson:
- zpos support"
* tag 'drm-next-2019-07-16' of git://anongit.freedesktop.org/drm/drm: (1815 commits)
Revert "Merge branch 'vmwgfx-next' of git://people.freedesktop.org/~thomash/linux into drm-next"
Revert "mm: adjust apply_to_pfn_range interface for dropped token."
mm: adjust apply_to_pfn_range interface for dropped token.
drm/amdgpu/navi10: add uclk activity sensor
drm/amdgpu: properly guard the generic discovery code
drm/amdgpu: add missing documentation on new module parameters
drm/amdgpu: don't invalidate caches in RELEASE_MEM, only do the writeback
drm/amd/display: avoid 64-bit division
drm/amdgpu/psp11: simplify the ucode register logic
drm/amdgpu: properly guard DC support in navi code
drm/amd/powerplay: vega20: fix uninitialized variable use
drm/amd/display: dcn20: include linux/delay.h
amdgpu: make pmu support optional
drm/amd/powerplay: Zero initialize current_rpm in vega20_get_fan_speed_percent
drm/amd/powerplay: Zero initialize freq in smu_v11_0_get_current_clk_freq
drm/amd/powerplay: Use memset to initialize metrics structs
drm/amdgpu/mes10.1: Fix header guard
drm/amd/powerplay: add temperature sensor support for navi10
drm/amdgpu: fix scheduler timeout calc
drm/amdgpu: Prepare for hmm_range_register API change (v2)
...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 211 |
1 files changed, 84 insertions, 127 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 58ed401c5996..3971c201f320 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -45,51 +45,12 @@ #include <linux/firmware.h> #include <linux/module.h> -#include <linux/mmu_notifier.h> -#include <linux/interval_tree.h> -#include <drm/drmP.h> #include <drm/drm.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" /** - * struct amdgpu_mn - * - * @adev: amdgpu device pointer - * @mm: process address space - * @mn: MMU notifier structure - * @type: type of MMU notifier - * @work: destruction work item - * @node: hash table node to find structure by adev and mn - * @lock: rw semaphore protecting the notifier nodes - * @objects: interval tree containing amdgpu_mn_nodes - * @read_lock: mutex for recursive locking of @lock - * @recursion: depth of recursion - * - * Data for each amdgpu device and process address space. - */ -struct amdgpu_mn { - /* constant after initialisation */ - struct amdgpu_device *adev; - struct mm_struct *mm; - struct mmu_notifier mn; - enum amdgpu_mn_type type; - - /* only used on destruction */ - struct work_struct work; - - /* protected by adev->mn_lock */ - struct hlist_node node; - - /* objects protected by lock */ - struct rw_semaphore lock; - struct rb_root_cached objects; - struct mutex read_lock; - atomic_t recursion; -}; - -/** * struct amdgpu_mn_node * * @it: interval node defining start-last of the affected address range @@ -103,7 +64,7 @@ struct amdgpu_mn_node { }; /** - * amdgpu_mn_destroy - destroy the MMU notifier + * amdgpu_mn_destroy - destroy the HMM mirror * * @work: previously sheduled work item * @@ -129,28 +90,26 @@ static void amdgpu_mn_destroy(struct work_struct *work) } up_write(&amn->lock); mutex_unlock(&adev->mn_lock); - mmu_notifier_unregister_no_release(&amn->mn, amn->mm); + + hmm_mirror_unregister(&amn->mirror); kfree(amn); } /** - * amdgpu_mn_release - callback to notify about mm destruction + * amdgpu_hmm_mirror_release - callback to notify about mm destruction * - * @mn: our notifier - * @mm: the mm this callback is about + * @mirror: the HMM mirror (mm) this callback is about * - * Shedule a work item to lazy destroy our notifier. + * Shedule a work item to lazy destroy HMM mirror. */ -static void amdgpu_mn_release(struct mmu_notifier *mn, - struct mm_struct *mm) +static void amdgpu_hmm_mirror_release(struct hmm_mirror *mirror) { - struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); + struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); INIT_WORK(&amn->work, amdgpu_mn_destroy); schedule_work(&amn->work); } - /** * amdgpu_mn_lock - take the write side lock for this notifier * @@ -181,14 +140,10 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) { if (blockable) - mutex_lock(&amn->read_lock); - else if (!mutex_trylock(&amn->read_lock)) + down_read(&amn->lock); + else if (!down_read_trylock(&amn->lock)) return -EAGAIN; - if (atomic_inc_return(&amn->recursion) == 1) - down_read_non_owner(&amn->lock); - mutex_unlock(&amn->read_lock); - return 0; } @@ -199,8 +154,7 @@ static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) */ static void amdgpu_mn_read_unlock(struct amdgpu_mn *amn) { - if (atomic_dec_return(&amn->recursion) == 0) - up_read_non_owner(&amn->lock); + up_read(&amn->lock); } /** @@ -229,149 +183,132 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node, true, false, MAX_SCHEDULE_TIMEOUT); if (r <= 0) DRM_ERROR("(%ld) failed to wait for user bo\n", r); - - amdgpu_ttm_tt_mark_user_pages(bo->tbo.ttm); } } /** - * amdgpu_mn_invalidate_range_start_gfx - callback to notify about mm change + * amdgpu_mn_sync_pagetables_gfx - callback to notify about mm change * - * @mn: our notifier - * @range: mmu notifier context + * @mirror: the hmm_mirror (mm) is about to update + * @update: the update start, end address * * Block for operations on BOs to finish and mark pages as accessed and * potentially dirty. */ -static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) +static int amdgpu_mn_sync_pagetables_gfx(struct hmm_mirror *mirror, + const struct hmm_update *update) { - struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); + struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); + unsigned long start = update->start; + unsigned long end = update->end; + bool blockable = update->blockable; struct interval_tree_node *it; - unsigned long end; /* notification is exclusive, but interval is inclusive */ - end = range->end - 1; + end -= 1; /* TODO we should be able to split locking for interval tree and * amdgpu_mn_invalidate_node */ - if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range))) + if (amdgpu_mn_read_lock(amn, blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, range->start, end); + it = interval_tree_iter_first(&amn->objects, start, end); while (it) { struct amdgpu_mn_node *node; - if (!mmu_notifier_range_blockable(range)) { + if (!blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, range->start, end); + it = interval_tree_iter_next(it, start, end); - amdgpu_mn_invalidate_node(node, range->start, end); + amdgpu_mn_invalidate_node(node, start, end); } + amdgpu_mn_read_unlock(amn); + return 0; } /** - * amdgpu_mn_invalidate_range_start_hsa - callback to notify about mm change + * amdgpu_mn_sync_pagetables_hsa - callback to notify about mm change * - * @mn: our notifier - * @mm: the mm this callback is about - * @start: start of updated range - * @end: end of updated range + * @mirror: the hmm_mirror (mm) is about to update + * @update: the update start, end address * * We temporarily evict all BOs between start and end. This * necessitates evicting all user-mode queues of the process. The BOs * are restorted in amdgpu_mn_invalidate_range_end_hsa. */ -static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) +static int amdgpu_mn_sync_pagetables_hsa(struct hmm_mirror *mirror, + const struct hmm_update *update) { - struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); + struct amdgpu_mn *amn = container_of(mirror, struct amdgpu_mn, mirror); + unsigned long start = update->start; + unsigned long end = update->end; + bool blockable = update->blockable; struct interval_tree_node *it; - unsigned long end; /* notification is exclusive, but interval is inclusive */ - end = range->end - 1; + end -= 1; - if (amdgpu_mn_read_lock(amn, mmu_notifier_range_blockable(range))) + if (amdgpu_mn_read_lock(amn, blockable)) return -EAGAIN; - it = interval_tree_iter_first(&amn->objects, range->start, end); + it = interval_tree_iter_first(&amn->objects, start, end); while (it) { struct amdgpu_mn_node *node; struct amdgpu_bo *bo; - if (!mmu_notifier_range_blockable(range)) { + if (!blockable) { amdgpu_mn_read_unlock(amn); return -EAGAIN; } node = container_of(it, struct amdgpu_mn_node, it); - it = interval_tree_iter_next(it, range->start, end); + it = interval_tree_iter_next(it, start, end); list_for_each_entry(bo, &node->bos, mn_list) { struct kgd_mem *mem = bo->kfd_bo; if (amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, - range->start, - end)) - amdgpu_amdkfd_evict_userptr(mem, range->mm); + start, end)) + amdgpu_amdkfd_evict_userptr(mem, amn->mm); } } + amdgpu_mn_read_unlock(amn); + return 0; } -/** - * amdgpu_mn_invalidate_range_end - callback to notify about mm change - * - * @mn: our notifier - * @mm: the mm this callback is about - * @start: start of updated range - * @end: end of updated range - * - * Release the lock again to allow new command submissions. +/* Low bits of any reasonable mm pointer will be unused due to struct + * alignment. Use these bits to make a unique key from the mm pointer + * and notifier type. */ -static void amdgpu_mn_invalidate_range_end(struct mmu_notifier *mn, - const struct mmu_notifier_range *range) -{ - struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn); - - amdgpu_mn_read_unlock(amn); -} +#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type)) -static const struct mmu_notifier_ops amdgpu_mn_ops[] = { +static struct hmm_mirror_ops amdgpu_hmm_mirror_ops[] = { [AMDGPU_MN_TYPE_GFX] = { - .release = amdgpu_mn_release, - .invalidate_range_start = amdgpu_mn_invalidate_range_start_gfx, - .invalidate_range_end = amdgpu_mn_invalidate_range_end, + .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_gfx, + .release = amdgpu_hmm_mirror_release }, [AMDGPU_MN_TYPE_HSA] = { - .release = amdgpu_mn_release, - .invalidate_range_start = amdgpu_mn_invalidate_range_start_hsa, - .invalidate_range_end = amdgpu_mn_invalidate_range_end, + .sync_cpu_device_pagetables = amdgpu_mn_sync_pagetables_hsa, + .release = amdgpu_hmm_mirror_release }, }; -/* Low bits of any reasonable mm pointer will be unused due to struct - * alignment. Use these bits to make a unique key from the mm pointer - * and notifier type. - */ -#define AMDGPU_MN_KEY(mm, type) ((unsigned long)(mm) + (type)) - /** - * amdgpu_mn_get - create notifier context + * amdgpu_mn_get - create HMM mirror context * * @adev: amdgpu device pointer * @type: type of MMU notifier context * - * Creates a notifier context for current->mm. + * Creates a HMM mirror context for current->mm. */ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, enum amdgpu_mn_type type) @@ -401,12 +338,10 @@ struct amdgpu_mn *amdgpu_mn_get(struct amdgpu_device *adev, amn->mm = mm; init_rwsem(&amn->lock); amn->type = type; - amn->mn.ops = &amdgpu_mn_ops[type]; amn->objects = RB_ROOT_CACHED; - mutex_init(&amn->read_lock); - atomic_set(&amn->recursion, 0); - r = __mmu_notifier_register(&amn->mn, mm); + amn->mirror.ops = &amdgpu_hmm_mirror_ops[type]; + r = hmm_mirror_register(&amn->mirror, mm); if (r) goto free_amn; @@ -432,7 +367,7 @@ free_amn: * @bo: amdgpu buffer object * @addr: userptr addr we should monitor * - * Registers an MMU notifier for the given BO at the specified address. + * Registers an HMM mirror for the given BO at the specified address. * Returns 0 on success, -ERRNO if anything goes wrong. */ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) @@ -488,11 +423,11 @@ int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) } /** - * amdgpu_mn_unregister - unregister a BO for notifier updates + * amdgpu_mn_unregister - unregister a BO for HMM mirror updates * * @bo: amdgpu buffer object * - * Remove any registration of MMU notifier updates from the buffer object. + * Remove any registration of HMM mirror updates from the buffer object. */ void amdgpu_mn_unregister(struct amdgpu_bo *bo) { @@ -528,3 +463,25 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo) mutex_unlock(&adev->mn_lock); } +/* flags used by HMM internal, not related to CPU/GPU PTE flags */ +static const uint64_t hmm_range_flags[HMM_PFN_FLAG_MAX] = { + (1 << 0), /* HMM_PFN_VALID */ + (1 << 1), /* HMM_PFN_WRITE */ + 0 /* HMM_PFN_DEVICE_PRIVATE */ +}; + +static const uint64_t hmm_range_values[HMM_PFN_VALUE_MAX] = { + 0xfffffffffffffffeUL, /* HMM_PFN_ERROR */ + 0, /* HMM_PFN_NONE */ + 0xfffffffffffffffcUL /* HMM_PFN_SPECIAL */ +}; + +void amdgpu_hmm_init_range(struct hmm_range *range) +{ + if (range) { + range->flags = hmm_range_flags; + range->values = hmm_range_values; + range->pfn_shift = PAGE_SHIFT; + INIT_LIST_HEAD(&range->list); + } +} |