summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/display/xe_panic.c
blob: df663286092abe8de1e1ca931f6bf63603b51839 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */

#include <drm/drm_cache.h>
#include <drm/drm_panic.h>

#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_panic.h"
#include "xe_bo.h"
#include "xe_res_cursor.h"

struct intel_panic {
	struct xe_res_cursor res;
	struct iosys_map vmap;

	int page;
};

static void xe_panic_kunmap(struct intel_panic *panic)
{
	if (!panic->vmap.is_iomem && iosys_map_is_set(&panic->vmap)) {
		drm_clflush_virt_range(panic->vmap.vaddr, PAGE_SIZE);
		kunmap_local(panic->vmap.vaddr);
	}
	iosys_map_clear(&panic->vmap);
	panic->page = -1;
}

/*
 * The scanout buffer pages are not mapped, so for each pixel,
 * use kmap_local_page_try_from_panic() to map the page, and write the pixel.
 * Try to keep the map from the previous pixel, to avoid too much map/unmap.
 */
static void xe_panic_page_set_pixel(struct drm_scanout_buffer *sb, unsigned int x,
				    unsigned int y, u32 color)
{
	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
	struct intel_panic *panic = fb->panic;
	struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));
	unsigned int new_page;
	unsigned int offset;

	if (fb->panic_tiling)
		offset = fb->panic_tiling(sb->width, x, y);
	else
		offset = y * sb->pitch[0] + x * sb->format->cpp[0];

	new_page = offset >> PAGE_SHIFT;
	offset = offset % PAGE_SIZE;
	if (new_page != panic->page) {
		if (xe_bo_is_vram(bo)) {
			/* Display is always mapped on root tile */
			struct xe_vram_region *vram = xe_bo_device(bo)->mem.vram;

			if (panic->page < 0 || new_page < panic->page) {
				xe_res_first(bo->ttm.resource, new_page * PAGE_SIZE,
					     bo->ttm.base.size - new_page * PAGE_SIZE, &panic->res);
			} else {
				xe_res_next(&panic->res, PAGE_SIZE * (new_page - panic->page));
			}
			iosys_map_set_vaddr_iomem(&panic->vmap,
						  vram->mapping + panic->res.start);
		} else {
			xe_panic_kunmap(panic);
			iosys_map_set_vaddr(&panic->vmap,
					    ttm_bo_kmap_try_from_panic(&bo->ttm,
								       new_page));
		}
		panic->page = new_page;
	}

	if (iosys_map_is_set(&panic->vmap))
		iosys_map_wr(&panic->vmap, offset, u32, color);
}

struct intel_panic *intel_panic_alloc(void)
{
	struct intel_panic *panic;

	panic = kzalloc(sizeof(*panic), GFP_KERNEL);

	return panic;
}

int intel_panic_setup(struct intel_panic *panic, struct drm_scanout_buffer *sb)
{
	struct intel_framebuffer *fb = (struct intel_framebuffer *)sb->private;
	struct xe_bo *bo = gem_to_xe_bo(intel_fb_bo(&fb->base));

	if (xe_bo_is_vram(bo) && !xe_bo_is_visible_vram(bo))
		return -ENODEV;

	panic->page = -1;
	sb->set_pixel = xe_panic_page_set_pixel;
	return 0;
}

void intel_panic_finish(struct intel_panic *panic)
{
	xe_panic_kunmap(panic);
}