diff options
| author | Vipin Sharma <vipinsh@google.com> | 2022-11-03 12:17:16 -0700 |
|---|---|---|
| committer | Sean Christopherson <seanjc@google.com> | 2022-11-16 10:03:24 -0800 |
| commit | 69a62e2004b8bc3f9572f88a592b168345a6bbf9 (patch) | |
| tree | 50825817df7fbbd8c35e6a3842f24e574cc30318 /tools/testing/selftests/kvm/max_guest_memory_test.c | |
| parent | 018ea2d71a43372cb984021f03514dc6dd3d46df (diff) | |
KVM: selftests: Use SZ_* macros from sizes.h in max_guest_memory_test.c
Replace size_1gb defined in max_guest_memory_test.c with the SZ_1G,
SZ_2G and SZ_4G from linux/sizes.h header file.
Signed-off-by: Vipin Sharma <vipinsh@google.com>
Reviewed-by: Sean Christopherson <seanjc@google.com>
Link: https://lore.kernel.org/r/20221103191719.1559407-5-vipinsh@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Diffstat (limited to 'tools/testing/selftests/kvm/max_guest_memory_test.c')
| -rw-r--r-- | tools/testing/selftests/kvm/max_guest_memory_test.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/tools/testing/selftests/kvm/max_guest_memory_test.c b/tools/testing/selftests/kvm/max_guest_memory_test.c index 1595b73dc09a..8056dc5831b5 100644 --- a/tools/testing/selftests/kvm/max_guest_memory_test.c +++ b/tools/testing/selftests/kvm/max_guest_memory_test.c @@ -11,6 +11,7 @@ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/atomic.h> +#include <linux/sizes.h> #include "kvm_util.h" #include "test_util.h" @@ -162,8 +163,7 @@ int main(int argc, char *argv[]) * just below the 4gb boundary. This test could create memory at * 1gb-3gb,but it's simpler to skip straight to 4gb. */ - const uint64_t size_1gb = (1 << 30); - const uint64_t start_gpa = (4ull * size_1gb); + const uint64_t start_gpa = SZ_4G; const int first_slot = 1; struct timespec time_start, time_run1, time_reset, time_run2; @@ -180,13 +180,13 @@ int main(int argc, char *argv[]) * are quite common for x86, requires changing only max_mem (KVM allows * 32k memslots, 32k * 2gb == ~64tb of guest memory). */ - slot_size = 2 * size_1gb; + slot_size = SZ_2G; max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS); TEST_ASSERT(max_slots > first_slot, "KVM is broken"); /* All KVM MMUs should be able to survive a 128gb guest. */ - max_mem = 128 * size_1gb; + max_mem = 128ull * SZ_1G; calc_default_nr_vcpus(); @@ -197,11 +197,11 @@ int main(int argc, char *argv[]) TEST_ASSERT(nr_vcpus > 0, "number of vcpus must be >0"); break; case 'm': - max_mem = atoi_paranoid(optarg) * size_1gb; + max_mem = 1ull * atoi_paranoid(optarg) * SZ_1G; TEST_ASSERT(max_mem > 0, "memory size must be >0"); break; case 's': - slot_size = atoi_paranoid(optarg) * size_1gb; + slot_size = 1ull * atoi_paranoid(optarg) * SZ_1G; TEST_ASSERT(slot_size > 0, "slot size must be >0"); break; case 'H': @@ -245,7 +245,7 @@ int main(int argc, char *argv[]) #ifdef __x86_64__ /* Identity map memory in the guest using 1gb pages. */ - for (i = 0; i < slot_size; i += size_1gb) + for (i = 0; i < slot_size; i += SZ_1G) __virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G); #else for (i = 0; i < slot_size; i += vm->page_size) @@ -260,7 +260,7 @@ int main(int argc, char *argv[]) vcpus = NULL; pr_info("Running with %lugb of guest memory and %u vCPUs\n", - (gpa - start_gpa) / size_1gb, nr_vcpus); + (gpa - start_gpa) / SZ_1G, nr_vcpus); rendezvous_with_vcpus(&time_start, "spawning"); rendezvous_with_vcpus(&time_run1, "run 1"); |