diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2020-12-15 10:21:47 +0100 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2020-12-15 10:21:48 +0100 |
commit | 5fbd41d3bf123af6a135bdea564087ec0f563eb0 (patch) | |
tree | 74c811f57266ad4f75edff9cfe71b57f78b9a2a8 | |
parent | 1d36dffa5d887715dacca0f717f4519b7be5e498 (diff) | |
parent | 05faf1559de52465f1e753e31883aa294e6179c1 (diff) | |
download | linux-5fbd41d3bf123af6a135bdea564087ec0f563eb0.tar.gz linux-5fbd41d3bf123af6a135bdea564087ec0f563eb0.tar.bz2 linux-5fbd41d3bf123af6a135bdea564087ec0f563eb0.zip |
Merge tag 'drm-misc-next-2020-11-27-1' of git://anongit.freedesktop.org/drm/drm-misc into drm-next
drm-misc-next for 5.11:
UAPI Changes:
Cross-subsystem Changes:
* char/agp: Disable frontend without CONFIG_DRM_LEGACY
* mm: Fix fput in mmap error path; Introduce vma_set_file() to change
vma->vm_file
Core Changes:
* dma-buf: Use sgtables in system heap; Move heap helpers to CMA-heap code;
Skip sync for unmapped buffers; Alloc higher order pages is available;
Respect num_fences when initializing shared fence list
* doc: Improvements around DRM modes and SCALING_FILTER
* Pass full state to connector atomic functions + callee updates
* Cleanups
* shmem: Map pages with caching by default; Cleanups
* ttm: Fix DMA32 for global page pool
* fbdev: Cleanups
* fb-helper: Update framebuffer after userspace writes; Unmap console buffer
during shutdown; Rework damage handling of shadow framebuffer
Driver Changes:
* amdgpu: Multi-hop fixes, Clenaups
* imx: Fix rotation for Vivante tiled formats; Support nearest-neighour
skaling; Cleanups
* mcde: Fix RGB formats; Support DPI output; Cleanups
* meson: HDMI clock fixes
* panel: Add driver and bindings for Innolux N125HCE-GN1
* panel/s6e63m0: More backlight levels; Fix init; Cleanups
* via: Clenunps
* virtio: Use fence ID for handling fences; Cleanups
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
From: Thomas Zimmermann <tzimmermann@suse.de>
Link: https://patchwork.freedesktop.org/patch/msgid/20201127083055.GA29139@linux-uq9g
70 files changed, 2131 insertions, 902 deletions
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml index f9750b0b6708..27fffafe5b5c 100644 --- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml +++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml @@ -159,6 +159,8 @@ properties: - innolux,g121x1-l03 # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel - innolux,n116bge + # InnoLux 13.3" FHD (1920x1080) eDP TFT LCD panel + - innolux,n125hce-gn1 # InnoLux 15.6" WXGA TFT LCD panel - innolux,n156bge-l21 # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile index cb2497d157f6..90ed8c789e48 100644 --- a/drivers/char/agp/Makefile +++ b/drivers/char/agp/Makefile @@ -1,7 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 -agpgart-y := backend.o frontend.o generic.o isoch.o +agpgart-y := backend.o generic.o isoch.o +ifeq ($(CONFIG_DRM_LEGACY),y) agpgart-$(CONFIG_COMPAT) += compat_ioctl.o +agpgart-y += frontend.o +endif + obj-$(CONFIG_AGP) += agpgart.o obj-$(CONFIG_AGP_ALI) += ali-agp.o diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index 4eb1c772ded7..bb09d64cd51e 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h @@ -186,8 +186,13 @@ int agp_add_bridge(struct agp_bridge_data *bridge); void agp_remove_bridge(struct agp_bridge_data *bridge); /* Frontend routines. */ +#if IS_ENABLED(CONFIG_DRM_LEGACY) int agp_frontend_initialize(void); void agp_frontend_cleanup(void); +#else +static inline int agp_frontend_initialize(void) { return 0; } +static inline void agp_frontend_cleanup(void) {} +#endif /* Generic routines. */ void agp_generic_enable(struct agp_bridge_data *bridge, u32 mode); diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 0eb80c1ecdab..e63684d4cd90 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -1166,9 +1166,6 @@ EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access); int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, unsigned long pgoff) { - struct file *oldfile; - int ret; - if (WARN_ON(!dmabuf || !vma)) return -EINVAL; @@ -1186,22 +1183,10 @@ int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma, return -EINVAL; /* readjust the vma */ - get_file(dmabuf->file); - oldfile = vma->vm_file; - vma->vm_file = dmabuf->file; + vma_set_file(vma, dmabuf->file); vma->vm_pgoff = pgoff; - ret = dmabuf->ops->mmap(dmabuf, vma); - if (ret) { - /* restore old parameters on failure */ - vma->vm_file = oldfile; - fput(dmabuf->file); - } else { - if (oldfile) - fput(oldfile); - } - return ret; - + return dmabuf->ops->mmap(dmabuf, vma); } EXPORT_SYMBOL_GPL(dma_buf_mmap); diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index bb5a42b10c29..6ddbeb5dfbf6 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -200,7 +200,7 @@ int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences) max = max(old->shared_count + num_fences, old->shared_max * 2); } else { - max = 4; + max = max(4ul, roundup_pow_of_two(num_fences)); } new = dma_resv_list_alloc(max); diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile index 6e54cdec3da0..974467791032 100644 --- a/drivers/dma-buf/heaps/Makefile +++ b/drivers/dma-buf/heaps/Makefile @@ -1,4 +1,3 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += heap-helpers.o obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c index e55384dc115b..5e7c3436310c 100644 --- a/drivers/dma-buf/heaps/cma_heap.c +++ b/drivers/dma-buf/heaps/cma_heap.c @@ -2,76 +2,305 @@ /* * DMABUF CMA heap exporter * - * Copyright (C) 2012, 2019 Linaro Ltd. + * Copyright (C) 2012, 2019, 2020 Linaro Ltd. * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson. + * + * Also utilizing parts of Andrew Davis' SRAM heap: + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/ + * Andrew F. Davis <afd@ti.com> */ - #include <linux/cma.h> -#include <linux/device.h> #include <linux/dma-buf.h> #include <linux/dma-heap.h> #include <linux/dma-map-ops.h> #include <linux/err.h> -#include <linux/errno.h> #include <linux/highmem.h> +#include <linux/io.h> +#include <linux/mm.h> #include <linux/module.h> -#include <linux/slab.h> #include <linux/scatterlist.h> -#include <linux/sched/signal.h> +#include <linux/slab.h> -#include "heap-helpers.h" struct cma_heap { struct dma_heap *heap; struct cma *cma; }; -static void cma_heap_free(struct heap_helper_buffer *buffer) +struct cma_heap_buffer { + struct cma_heap *heap; + struct list_head attachments; + struct mutex lock; + unsigned long len; + struct page *cma_pages; + struct page **pages; + pgoff_t pagecount; + int vmap_cnt; + void *vaddr; +}; + +struct dma_heap_attachment { + struct device *dev; + struct sg_table table; + struct list_head list; + bool mapped; +}; + +static int cma_heap_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) { - struct cma_heap *cma_heap = dma_heap_get_drvdata(buffer->heap); - unsigned long nr_pages = buffer->pagecount; - struct page *cma_pages = buffer->priv_virt; + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + int ret; - /* free page list */ - kfree(buffer->pages); - /* release memory */ - cma_release(cma_heap->cma, cma_pages, nr_pages); + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = sg_alloc_table_from_pages(&a->table, buffer->pages, + buffer->pagecount, 0, + buffer->pagecount << PAGE_SHIFT, + GFP_KERNEL); + if (ret) { + kfree(a); + return ret; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->list); + a->mapped = false; + + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->list, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void cma_heap_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a = attachment->priv; + + mutex_lock(&buffer->lock); + list_del(&a->list); + mutex_unlock(&buffer->lock); + + sg_free_table(&a->table); + kfree(a); +} + +static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + struct sg_table *table = &a->table; + int ret; + + ret = dma_map_sgtable(attachment->dev, table, direction, 0); + if (ret) + return ERR_PTR(-ENOMEM); + a->mapped = true; + return table; +} + +static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment, + struct sg_table *table, + enum dma_data_direction direction) +{ + struct dma_heap_attachment *a = attachment->priv; + + a->mapped = false; + dma_unmap_sgtable(attachment->dev, table, direction, 0); +} + +static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + invalidate_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_cpu(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf, + enum dma_data_direction direction) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct dma_heap_attachment *a; + + if (buffer->vmap_cnt) + flush_kernel_vmap_range(buffer->vaddr, buffer->len); + + mutex_lock(&buffer->lock); + list_for_each_entry(a, &buffer->attachments, list) { + if (!a->mapped) + continue; + dma_sync_sgtable_for_device(a->dev, &a->table, direction); + } + mutex_unlock(&buffer->lock); + + return 0; +} + +static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct cma_heap_buffer *buffer = vma->vm_private_data; + + if (vmf->pgoff > buffer->pagecount) + return VM_FAULT_SIGBUS; + + vmf->page = buffer->pages[vmf->pgoff]; + get_page(vmf->page); + + return 0; +} + +static const struct vm_operations_struct dma_heap_vm_ops = { + .fault = cma_heap_vm_fault, +}; + +static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) + return -EINVAL; + + vma->vm_ops = &dma_heap_vm_ops; + vma->vm_private_data = buffer; + + return 0; +} + +static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer) +{ + void *vaddr; + + vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); + if (!vaddr) + return ERR_PTR(-ENOMEM); + + return vaddr; +} + +static int cma_heap_vmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + void *vaddr; + int ret = 0; + + mutex_lock(&buffer->lock); + if (buffer->vmap_cnt) { + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); + goto out; + } + + vaddr = cma_heap_do_vmap(buffer); + if (IS_ERR(vaddr)) { + ret = PTR_ERR(vaddr); + goto out; + } + buffer->vaddr = vaddr; + buffer->vmap_cnt++; + dma_buf_map_set_vaddr(map, buffer->vaddr); +out: + mutex_unlock(&buffer->lock); + + return ret; +} + +static void cma_heap_vunmap(struct dma_buf *dmabuf, struct dma_buf_map *map) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + if (!--buffer->vmap_cnt) { + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + mutex_unlock(&buffer->lock); + dma_buf_map_clear(map); +} + +static void cma_heap_dma_buf_release(struct dma_buf *dmabuf) +{ + struct cma_heap_buffer *buffer = dmabuf->priv; + struct cma_heap *cma_heap = buffer->heap; + + if (buffer->vmap_cnt > 0) { + WARN(1, "%s: buffer still mapped in the kernel\n", __func__); + vunmap(buffer->vaddr); + buffer->vaddr = NULL; + } + + cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount); kfree(buffer); } -/* dmabuf heap CMA operations functions */ +static const struct dma_buf_ops cma_heap_buf_ops = { + .attach = cma_heap_attach, + .detach = cma_heap_detach, + .map_dma_buf = cma_heap_map_dma_buf, + .unmap_dma_buf = cma_heap_unmap_dma_buf, + .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access, + .end_cpu_access = cma_heap_dma_buf_end_cpu_access, + .mmap = cma_heap_mmap, + .vmap = cma_heap_vmap, + .vunmap = cma_heap_vunmap, + .release = cma_heap_dma_buf_release, +}; + static int cma_heap_allocate(struct dma_heap *heap, - unsigned long len, - unsigned long fd_flags, - unsigned long heap_flags) + unsigned long len, + unsigned long fd_flags, + unsigned long heap_flags) { struct cma_heap *cma_heap = dma_heap_get_drvdata(heap); - struct heap_helper_buffer *helper_buffer; - struct page *cma_pages; + struct cma_heap_buffer *buffer; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); size_t size = PAGE_ALIGN(len); - unsigned long nr_pages = size >> PAGE_SHIFT; + pgoff_t pagecount = size >> PAGE_SHIFT; unsigned long align = get_order(size); + struct page *cma_pages; struct dma_buf *dmabuf; int ret = -ENOMEM; pgoff_t pg; - if (align > CONFIG_CMA_ALIGNMENT) - align = CONFIG_CMA_ALIGNMENT; - - helper_buffer = kzalloc(sizeof(*helper_buffer), GFP_KERNEL); - if (!helper_buffer) + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); + if (!buffer) return -ENOMEM; - init_heap_helper_buffer(helper_buffer, cma_heap_free); - helper_buffer->heap = heap; - helper_buffer->size = len; + INIT_LIST_HEAD(&buffer->attachments); + mutex_init(&buffer->lock); + buffer->len = size; + + if (align > CONFIG_CMA_ALIGNMENT) + align = CONFIG_CMA_ALIGNMENT; - cma_pages = cma_alloc(cma_heap->cma, nr_pages, align, false); + cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false); if (!cma_pages) - goto free_buf; + goto free_buffer; + /* Clear the cma pages */ if (PageHighMem(cma_pages)) { - unsigned long nr_clear_pages = nr_pages; + unsigned long nr_ |