summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
diff options
context:
space:
mode:
authorTvrtko Ursulin <tvrtko.ursulin@igalia.com>2025-10-20 12:54:07 +0100
committerTvrtko Ursulin <tursulin@ursulin.net>2025-10-31 09:01:08 +0000
commit0af5b6a8f8dd41fd801bb0f2af3295d69ba8b7fe (patch)
tree916000b46e1064b84a3c6d062e230126a93d353b /drivers/gpu/drm/ttm/tests/ttm_pool_test.c
parentd53adc244fbf965d7efeefb278ff8f2664bbe20e (diff)
drm/ttm: Replace multiple booleans with flags in pool init
Multiple consecutive boolean function arguments are usually not very readable. Replace the ones in ttm_pool_init() with flags with the additional benefit of soon being able to pass in more data with just this one code base churning cost. Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@igalia.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: Christian König <christian.koenig@amd.com> Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com> Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Tvrtko Ursulin <tursulin@ursulin.net> Link: https://lore.kernel.org/r/20251020115411.36818-3-tvrtko.ursulin@igalia.com
Diffstat (limited to 'drivers/gpu/drm/ttm/tests/ttm_pool_test.c')
-rw-r--r--drivers/gpu/drm/ttm/tests/ttm_pool_test.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
index 17ebb9fbd688..11c92bd75779 100644
--- a/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
+++ b/drivers/gpu/drm/ttm/tests/ttm_pool_test.c
@@ -13,7 +13,7 @@
struct ttm_pool_test_case {
const char *description;
unsigned int order;
- bool use_dma_alloc;
+ unsigned int alloc_flags;
};
struct ttm_pool_test_priv {
@@ -87,7 +87,7 @@ static struct ttm_pool *ttm_pool_pre_populated(struct kunit *test,
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -114,12 +114,12 @@ static const struct ttm_pool_test_case ttm_pool_basic_cases[] = {
{
.description = "One page, with coherent DMA mappings enabled",
.order = 0,
- .use_dma_alloc = true,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
},
{
.description = "Above the allocation limit, with coherent DMA mappings enabled",
.order = MAX_PAGE_ORDER + 1,
- .use_dma_alloc = true,
+ .alloc_flags = TTM_ALLOCATION_POOL_USE_DMA_ALLOC,
},
};
@@ -151,13 +151,11 @@ static void ttm_pool_alloc_basic(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->use_dma_alloc,
- false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, params->alloc_flags);
KUNIT_ASSERT_PTR_EQ(test, pool->dev, devs->dev);
KUNIT_ASSERT_EQ(test, pool->nid, NUMA_NO_NODE);
- KUNIT_ASSERT_EQ(test, ttm_pool_uses_dma_alloc(pool),
- params->use_dma_alloc);
+ KUNIT_ASSERT_EQ(test, pool->alloc_flags, params->alloc_flags);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -167,14 +165,14 @@ static void ttm_pool_alloc_basic(struct kunit *test)
last_page = tt->pages[tt->num_pages - 1];
if (params->order <= MAX_PAGE_ORDER) {
- if (params->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NOT_NULL(test, (void *)last_page->private);
} else {
KUNIT_ASSERT_EQ(test, fst_page->private, params->order);
}
} else {
- if (params->use_dma_alloc) {
+ if (ttm_pool_uses_dma_alloc(pool)) {
KUNIT_ASSERT_NOT_NULL(test, (void *)fst_page->private);
KUNIT_ASSERT_NULL(test, (void *)last_page->private);
} else {
@@ -220,7 +218,7 @@ static void ttm_pool_alloc_basic_dma_addr(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
err = ttm_pool_alloc(pool, tt, &simple_ctx);
KUNIT_ASSERT_EQ(test, err, 0);
@@ -350,7 +348,7 @@ static void ttm_pool_free_dma_alloc(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, true, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, TTM_ALLOCATION_POOL_USE_DMA_ALLOC);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];
@@ -381,7 +379,7 @@ static void ttm_pool_free_no_dma_alloc(struct kunit *test)
pool = kunit_kzalloc(test, sizeof(*pool), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, pool);
- ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, false, false);
+ ttm_pool_init(pool, devs->dev, NUMA_NO_NODE, 0);
ttm_pool_alloc(pool, tt, &simple_ctx);
pt = &pool->caching[caching].orders[order];