diff --git a/arch/x86/include/asm/kvmclock.h b/arch/x86/include/asm/kvmclock.h index eceea929909740613471b74567d8e24413b57339..6c576519210280483b93f67d2101aa4f1d26445d 100644 --- a/arch/x86/include/asm/kvmclock.h +++ b/arch/x86/include/asm/kvmclock.h @@ -2,6 +2,20 @@ #ifndef _ASM_X86_KVM_CLOCK_H #define _ASM_X86_KVM_CLOCK_H +#include <linux/percpu.h> + extern struct clocksource kvm_clock; +DECLARE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); + +static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) +{ + return &this_cpu_read(hv_clock_per_cpu)->pvti; +} + +static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) +{ + return this_cpu_read(hv_clock_per_cpu); +} + #endif /* _ASM_X86_KVM_CLOCK_H */ diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index ad273e5861c1b267cb815fdd54969c2b26ffc90d..73c74b961d0fd9b69b6dd0f9fab50b312b5fccd6 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -49,18 +49,9 @@ early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall); static struct pvclock_vsyscall_time_info hv_clock_boot[HVC_BOOT_ARRAY_SIZE] __bss_decrypted __aligned(PAGE_SIZE); static struct pvclock_wall_clock wall_clock __bss_decrypted; -static DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); static struct pvclock_vsyscall_time_info *hvclock_mem; - -static inline struct pvclock_vcpu_time_info *this_cpu_pvti(void) -{ - return &this_cpu_read(hv_clock_per_cpu)->pvti; -} - -static inline struct pvclock_vsyscall_time_info *this_cpu_hvclock(void) -{ - return this_cpu_read(hv_clock_per_cpu); -} +DEFINE_PER_CPU(struct pvclock_vsyscall_time_info *, hv_clock_per_cpu); +EXPORT_PER_CPU_SYMBOL_GPL(hv_clock_per_cpu); /* * The wallclock is the time of day when we booted. Since then, some time may diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index fe03bd978761eb34a1b5230303236c5e746a807d..751aa85a300129e9489e8c6ed61a1e4ff35117e0 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -65,8 +65,8 @@ static inline struct kvm_cpuid_entry2 *cpuid_entry2_find( for (i = 0; i < nent; i++) { e = &entries[i]; - if (e->function == function && (e->index == index || - !(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX))) + if (e->function == function && + (!(e->flags & KVM_CPUID_FLAG_SIGNIFCANT_INDEX) || e->index == index)) return e; } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 9ecfcf13a046d1a3e213e0da82be7975bb17e345..116b08904ac34655e13ecced704763e03e6fc14b 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6848,7 +6848,7 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu) */ tsx_ctrl = vmx_find_uret_msr(vmx, MSR_IA32_TSX_CTRL); if (tsx_ctrl) - vmx->guest_uret_msrs[i].mask = ~(u64)TSX_CTRL_CPUID_CLEAR; + tsx_ctrl->mask = ~(u64)TSX_CTRL_CPUID_CLEAR; } err = alloc_loaded_vmcs(&vmx->vmcs01); diff --git a/drivers/ptp/ptp_kvm_x86.c b/drivers/ptp/ptp_kvm_x86.c index 3dd519dfc473c7719da1e7f35f532ac548012473..d0096cd7096a8192b92ae356279f6de37edf93b0 100644 --- a/drivers/ptp/ptp_kvm_x86.c +++ b/drivers/ptp/ptp_kvm_x86.c @@ -15,8 +15,6 @@ #include <linux/ptp_clock_kernel.h> #include <linux/ptp_kvm.h> -struct pvclock_vsyscall_time_info *hv_clock; - static phys_addr_t clock_pair_gpa; static struct kvm_clock_pairing clock_pair; @@ -28,8 +26,7 @@ int kvm_arch_ptp_init(void) return -ENODEV; clock_pair_gpa = slow_virt_to_phys(&clock_pair); - hv_clock = pvclock_get_pvti_cpu0_va(); - if (!hv_clock) + if (!pvclock_get_pvti_cpu0_va()) return -ENODEV; ret = kvm_hypercall2(KVM_HC_CLOCK_PAIRING, clock_pair_gpa, @@ -64,10 +61,8 @@ int kvm_arch_ptp_get_crosststamp(u64 *cycle, struct timespec64 *tspec, struct pvclock_vcpu_time_info *src; unsigned int version; long ret; - int cpu; - cpu = smp_processor_id(); - src = &hv_clock[cpu].pvti; + src = this_cpu_pvti(); do { /* diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index eba8bd08293e7f1a57e4cb9b73e02ce6134347c4..05e65ca1c30cda841a66b9ee7886b99656c070bf 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -315,7 +315,7 @@ static inline void set_xmm(int n, unsigned long val) #define GET_XMM(__xmm) \ ({ \ unsigned long __val; \ - asm volatile("movq %%"#__xmm", %0" : "=r"(__val) : : #__xmm); \ + asm volatile("movq %%"#__xmm", %0" : "=r"(__val)); \ __val; \ }) diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c index c5e0dd664a7b35e4387241012e16f13e960c8f5c..4158da0da2bba8e5e8ea12acd87f6cff6206e3ae 100644 --- a/tools/testing/selftests/kvm/rseq_test.c +++ b/tools/testing/selftests/kvm/rseq_test.c @@ -10,6 +10,7 @@ #include <signal.h> #include <syscall.h> #include <sys/ioctl.h> +#include <sys/sysinfo.h> #include <asm/barrier.h> #include <linux/atomic.h> #include <linux/rseq.h> @@ -39,6 +40,7 @@ static __thread volatile struct rseq __rseq = { static pthread_t migration_thread; static cpu_set_t possible_mask; +static int min_cpu, max_cpu; static bool done; static atomic_t seq_cnt; @@ -57,20 +59,37 @@ static void sys_rseq(int flags) TEST_ASSERT(!r, "rseq failed, errno = %d (%s)", errno, strerror(errno)); } +static int next_cpu(int cpu) +{ + /* + * Advance to the next CPU, skipping those that weren't in the original + * affinity set. Sadly, there is no CPU_SET_FOR_EACH, and cpu_set_t's + * data storage is considered as opaque. Note, if this task is pinned + * to a small set of discontigous CPUs, e.g. 2 and 1023, this loop will + * burn a lot cycles and the test will take longer than normal to + * complete. + */ + do { + cpu++; + if (cpu > max_cpu) { + cpu = min_cpu; + TEST_ASSERT(CPU_ISSET(cpu, &possible_mask), + "Min CPU = %d must always be usable", cpu); + break; + } + } while (!CPU_ISSET(cpu, &possible_mask)); + + return cpu; +} + static void *migration_worker(void *ign) { cpu_set_t allowed_mask; - int r, i, nr_cpus, cpu; + int r, i, cpu; CPU_ZERO(&allowed_mask); - nr_cpus = CPU_COUNT(&possible_mask); - - for (i = 0; i < NR_TASK_MIGRATIONS; i++) { - cpu = i % nr_cpus; - if (!CPU_ISSET(cpu, &possible_mask)) - continue; - + for (i = 0, cpu = min_cpu; i < NR_TASK_MIGRATIONS; i++, cpu = next_cpu(cpu)) { CPU_SET(cpu, &allowed_mask); /* @@ -154,6 +173,36 @@ static void *migration_worker(void *ign) return NULL; } +static int calc_min_max_cpu(void) +{ + int i, cnt, nproc; + + if (CPU_COUNT(&possible_mask) < 2) + return -EINVAL; + + /* + * CPU_SET doesn't provide a FOR_EACH helper, get the min/max CPU that + * this task is affined to in order to reduce the time spent querying + * unusable CPUs, e.g. if this task is pinned to a small percentage of + * total CPUs. + */ + nproc = get_nprocs_conf(); + min_cpu = -1; + max_cpu = -1; + cnt = 0; + + for (i = 0; i < nproc; i++) { + if (!CPU_ISSET(i, &possible_mask)) + continue; + if (min_cpu == -1) + min_cpu = i; + max_cpu = i; + cnt++; + } + + return (cnt < 2) ? -EINVAL : 0; +} + int main(int argc, char *argv[]) { int r, i, snapshot; @@ -167,8 +216,8 @@ int main(int argc, char *argv[]) TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)", errno, strerror(errno)); - if (CPU_COUNT(&possible_mask) < 2) { - print_skip("Only one CPU, task migration not possible\n"); + if (calc_min_max_cpu()) { + print_skip("Only one usable CPU, task migration not possible"); exit(KSFT_SKIP); }