diff options
| author | Dave Airlie <airlied@redhat.com> | 2025-06-18 08:09:27 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2025-06-18 08:09:35 +1000 |
| commit | 45215c589e7f22641e2fc6f518bcbead71d90f9c (patch) | |
| tree | 1faca88e4993230babcf5b95d590d3e5e0b14f2b /drivers/dma-buf | |
| parent | e04c78d86a9699d136910cfc0bdcf01087e3267e (diff) | |
| parent | c5b4393c5492555e35c08677a326c9c53b275abd (diff) | |
Merge tag 'drm-misc-next-2025-06-12' of https://gitlab.freedesktop.org/drm/misc/kernel into drm-next
drm-misc-next for 6.17:
UAPI Changes:
Cross-subsystem Changes:
Core Changes:
- atomic-helpers: Tune the enable / disable sequence
- bridge: Add destroy hook
- color management: Add helpers for hardware gamma LUT handling
- HDMI: Add CEC handling, YUV420 output support
- sched: tracing improvements
Driver Changes:
- hyperv: Move out of simple-kms, drm_panic support
- i915: drm_panel_follower support
- imx: Add IMX8qxq Display Controller Support
- lima: Add Rockchip RK3528 GPU Support
- nouveau: fence handling cleanup
- panfrost: Add BO labeling, 64-bit registers access
- qaic: Add RAS Support
- rz-du: Add RZ/V2H(P) Support, MIPI-DSI DCS Support
- sun4i: Add H616 Support
- tidss: Add TI AM62L Support
- vkms: YUV and R* formats support
- bridges:
- Switched to reference counted drm_bridge allocations
- panels:
- Switched to reference counted drm_panel allocations
- Add support for fwnode-based panel lookup
- himax-hx8394: Support for Huiling hl055fhv028c
- ilitek-ili9881c: Support for 7" Raspberry Pi 720x1280
- panel-edp: Support for KDC KD116N3730A05, N160JCE-ELL CMN,
- panel-simple: Support for AUO P238HAN01
- st7701: Support for Winstar wf40eswaa6mnn0
- visionox-rm69299: Support for rm69299-shift
- New panels: Renesas R61307, Renesas R69328
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Maxime Ripard <mripard@redhat.com>
Link: https://lore.kernel.org/r/20250612-coucal-of-impossible-cleaning-a5eecf@houat
Diffstat (limited to 'drivers/dma-buf')
| -rw-r--r-- | drivers/dma-buf/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/dma-buf/dma-fence-chain.c | 7 | ||||
| -rw-r--r-- | drivers/dma-buf/dma-fence.c | 66 | ||||
| -rw-r--r-- | drivers/dma-buf/sw_sync.c | 2 | ||||
| -rw-r--r-- | drivers/dma-buf/sync_file.c | 8 | ||||
| -rw-r--r-- | drivers/dma-buf/udmabuf.c | 23 |
6 files changed, 65 insertions, 42 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index fee04fdb0822..b46eb8a552d7 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -36,7 +36,6 @@ config UDMABUF depends on DMA_SHARED_BUFFER depends on MEMFD_CREATE || COMPILE_TEST depends on MMU - select VMAP_PFN help A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c index 9663ba1bb6ac..a8a90acf4f34 100644 --- a/drivers/dma-buf/dma-fence-chain.c +++ b/drivers/dma-buf/dma-fence-chain.c @@ -218,7 +218,6 @@ static void dma_fence_chain_set_deadline(struct dma_fence *fence, } const struct dma_fence_ops dma_fence_chain_ops = { - .use_64bit_seqno = true, .get_driver_name = dma_fence_chain_get_driver_name, .get_timeline_name = dma_fence_chain_get_timeline_name, .enable_signaling = dma_fence_chain_enable_signaling, @@ -252,7 +251,7 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, chain->prev_seqno = 0; /* Try to reuse the context of the previous chain node. */ - if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) { + if (prev_chain && __dma_fence_is_later(prev, seqno, prev->seqno)) { context = prev->context; chain->prev_seqno = prev->seqno; } else { @@ -262,8 +261,8 @@ void dma_fence_chain_init(struct dma_fence_chain *chain, seqno = max(prev->seqno, seqno); } - dma_fence_init(&chain->base, &dma_fence_chain_ops, - &chain->lock, context, seqno); + dma_fence_init64(&chain->base, &dma_fence_chain_ops, &chain->lock, + context, seqno); /* * Chaining dma_fence_chain container together is only allowed through diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index f0cdd3e99d36..74f9e4b665e3 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -538,8 +538,8 @@ void dma_fence_release(struct kref *kref) if (WARN(!list_empty(&fence->cb_list) && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags), "Fence %s:%s:%llx:%llx released with pending signals!\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), + dma_fence_driver_name(fence), + dma_fence_timeline_name(fence), fence->context, fence->seqno)) { unsigned long flags; @@ -983,12 +983,32 @@ EXPORT_SYMBOL(dma_fence_set_deadline); void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq) { seq_printf(seq, "%s %s seq %llu %ssignalled\n", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), fence->seqno, + dma_fence_driver_name(fence), + dma_fence_timeline_name(fence), + fence->seqno, dma_fence_is_signaled(fence) ? "" : "un"); } EXPORT_SYMBOL(dma_fence_describe); +static void +__dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, u64 seqno, unsigned long flags) +{ + BUG_ON(!lock); + BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); + + kref_init(&fence->refcount); + fence->ops = ops; + INIT_LIST_HEAD(&fence->cb_list); + fence->lock = lock; + fence->context = context; + fence->seqno = seqno; + fence->flags = flags; + fence->error = 0; + + trace_dma_fence_init(fence); +} + /** * dma_fence_init - Initialize a custom fence. * @fence: the fence to initialize @@ -1008,18 +1028,30 @@ void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, spinlock_t *lock, u64 context, u64 seqno) { - BUG_ON(!lock); - BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name); - - kref_init(&fence->refcount); - fence->ops = ops; - INIT_LIST_HEAD(&fence->cb_list); - fence->lock = lock; - fence->context = context; - fence->seqno = seqno; - fence->flags = 0UL; - fence->error = 0; - - trace_dma_fence_init(fence); + __dma_fence_init(fence, ops, lock, context, seqno, 0UL); } EXPORT_SYMBOL(dma_fence_init); + +/** + * dma_fence_init64 - Initialize a custom fence with 64-bit seqno support. + * @fence: the fence to initialize + * @ops: the dma_fence_ops for operations on this fence + * @lock: the irqsafe spinlock to use for locking this fence + * @context: the execution context this fence is run on + * @seqno: a linear increasing sequence number for this context + * + * Initializes an allocated fence, the caller doesn't have to keep its + * refcount after committing with this fence, but it will need to hold a + * refcount again if &dma_fence_ops.enable_signaling gets called. + * + * Context and seqno are used for easy comparison between fences, allowing + * to check which fence is later by simply using dma_fence_later(). + */ +void +dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops, + spinlock_t *lock, u64 context, u64 seqno) +{ + __dma_fence_init(fence, ops, lock, context, seqno, + BIT(DMA_FENCE_FLAG_SEQNO64_BIT)); +} +EXPORT_SYMBOL(dma_fence_init64); diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c index 4f27ee93a00c..3c20f1d31cf5 100644 --- a/drivers/dma-buf/sw_sync.c +++ b/drivers/dma-buf/sw_sync.c @@ -170,7 +170,7 @@ static bool timeline_fence_signaled(struct dma_fence *fence) { struct sync_timeline *parent = dma_fence_parent(fence); - return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops); + return !__dma_fence_is_later(fence, fence->seqno, parent->value); } static void timeline_fence_set_deadline(struct dma_fence *fence, ktime_t deadline) diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index d9b1c1b2a72b..212df4b849fe 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -137,8 +137,8 @@ char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len) struct dma_fence *fence = sync_file->fence; snprintf(buf, len, "%s-%s%llu-%lld", - fence->ops->get_driver_name(fence), - fence->ops->get_timeline_name(fence), + dma_fence_driver_name(fence), + dma_fence_timeline_name(fence), fence->context, fence->seqno); } @@ -262,9 +262,9 @@ err_put_fd: static int sync_fill_fence_info(struct dma_fence *fence, struct sync_fence_info *info) { - strscpy(info->obj_name, fence->ops->get_timeline_name(fence), + strscpy(info->obj_name, dma_fence_timeline_name(fence), sizeof(info->obj_name)); - strscpy(info->driver_name, fence->ops->get_driver_name(fence), + strscpy(info->driver_name, dma_fence_driver_name(fence), sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index c9d0c68d2fcb..40399c26e6be 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -109,29 +109,22 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) { struct udmabuf *ubuf = buf->priv; - unsigned long *pfns; + struct page **pages; void *vaddr; pgoff_t pg; dma_resv_assert_held(buf->resv); - /** - * HVO may free tail pages, so just use pfn to map each folio - * into vmalloc area. - */ - pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); - if (!pfns) + pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL); + if (!pages) return -ENOMEM; - for (pg = 0; pg < ubuf->pagecount; pg++) { - unsigned long pfn = folio_pfn(ubuf->folios[pg]); - - pfn += ubuf->offsets[pg] >> PAGE_SHIFT; - pfns[pg] = pfn; - } + for (pg = 0; pg < ubuf->pagecount; pg++) + pages[pg] = folio_page(ubuf->folios[pg], + ubuf->offsets[pg] >> PAGE_SHIFT); - vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); - kvfree(pfns); + vaddr = vm_map_ram(pages, ubuf->pagecount, -1); + kvfree(pages); if (!vaddr) return -EINVAL; |