Skip to content

Commit

Permalink
Merge tag 'kvmarm-fixes-5.4-2' of git://git.kernel.org/pub/scm/linux/…
Browse files Browse the repository at this point in the history
…kernel/git/kvmarm/kvmarm into HEAD

KVM/arm fixes for 5.4, take #2

Special PMU edition:

- Fix cycle counter truncation
- Fix cycle counter overflow limit on pure 64bit system
- Allow chained events to be actually functional
- Correct sample period after overflow
  • Loading branch information
bonzini committed Oct 22, 2019
2 parents 49dedf0 + 8c3252c commit 9800c24
Show file tree
Hide file tree
Showing 2 changed files with 39 additions and 13 deletions.
4 changes: 4 additions & 0 deletions arch/arm64/kvm/sys_regs.c
Original file line number Diff line number Diff line change
Expand Up @@ -632,6 +632,8 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
*/
val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
| (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
if (!system_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, r->reg) = val;
}

Expand Down Expand Up @@ -682,6 +684,8 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val = __vcpu_sys_reg(vcpu, PMCR_EL0);
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
if (!system_supports_32bit_el0())
val |= ARMV8_PMU_PMCR_LC;
__vcpu_sys_reg(vcpu, PMCR_EL0) = val;
kvm_pmu_handle_pmcr(vcpu, val);
kvm_vcpu_pmu_restore_guest(vcpu);
Expand Down
48 changes: 35 additions & 13 deletions virt/kvm/arm/pmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/perf_event.h>
#include <linux/perf/arm_pmu.h>
#include <linux/uaccess.h>
#include <asm/kvm_emulate.h>
#include <kvm/arm_pmu.h>
Expand Down Expand Up @@ -146,8 +147,7 @@ u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
if (kvm_pmu_pmc_is_chained(pmc) &&
kvm_pmu_idx_is_high_counter(select_idx))
counter = upper_32_bits(counter);

else if (!kvm_pmu_idx_is_64bit(vcpu, select_idx))
else if (select_idx != ARMV8_PMU_CYCLE_IDX)
counter = lower_32_bits(counter);

return counter;
Expand Down Expand Up @@ -193,24 +193,27 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
*/
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
{
u64 counter, reg;
u64 counter, reg, val;

pmc = kvm_pmu_get_canonical_pmc(pmc);
if (!pmc->perf_event)
return;

counter = kvm_pmu_get_pair_counter_value(vcpu, pmc);

if (kvm_pmu_pmc_is_chained(pmc)) {
reg = PMEVCNTR0_EL0 + pmc->idx;
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);
if (pmc->idx == ARMV8_PMU_CYCLE_IDX) {
reg = PMCCNTR_EL0;
val = counter;
} else {
reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
__vcpu_sys_reg(vcpu, reg) = lower_32_bits(counter);
reg = PMEVCNTR0_EL0 + pmc->idx;
val = lower_32_bits(counter);
}

__vcpu_sys_reg(vcpu, reg) = val;

if (kvm_pmu_pmc_is_chained(pmc))
__vcpu_sys_reg(vcpu, reg + 1) = upper_32_bits(counter);

kvm_pmu_release_perf_event(pmc);
}

Expand Down Expand Up @@ -440,15 +443,34 @@ static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
struct pt_regs *regs)
{
struct kvm_pmc *pmc = perf_event->overflow_handler_context;
struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu);
struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
int idx = pmc->idx;
u64 period;

cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE);

/*
* Reset the sample period to the architectural limit,
* i.e. the point where the counter overflows.
*/
period = -(local64_read(&perf_event->count));

if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
period &= GENMASK(31, 0);

local64_set(&perf_event->hw.period_left, 0);
perf_event->attr.sample_period = period;
perf_event->hw.sample_period = period;

__vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx);

if (kvm_pmu_overflow_status(vcpu)) {
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
}

cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD);
}

/**
Expand Down Expand Up @@ -567,12 +589,12 @@ static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx)
* high counter.
*/
attr.sample_period = (-counter) & GENMASK(63, 0);
if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;

event = perf_event_create_kernel_counter(&attr, -1, current,
kvm_pmu_perf_overflow,
pmc + 1);

if (kvm_pmu_counter_is_enabled(vcpu, pmc->idx + 1))
attr.config1 |= PERF_ATTR_CFG1_KVM_PMU_CHAINED;
} else {
/* The initial sample period (overflow count) of an event. */
if (kvm_pmu_idx_is_64bit(vcpu, pmc->idx))
Expand Down

0 comments on commit 9800c24

Please sign in to comment.