diff options
| -rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 24 | 
1 files changed, 14 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index d6d6529349d..75a090394b6 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -334,11 +334,13 @@ static u64 pmc_amd_save_disable_all(void)  	for (idx = 0; idx < nr_counters_generic; idx++) {  		u64 val; +		if (!test_bit(idx, cpuc->active_mask)) +			continue;  		rdmsrl(MSR_K7_EVNTSEL0 + idx, val); -		if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) { -			val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; -			wrmsrl(MSR_K7_EVNTSEL0 + idx, val); -		} +		if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE)) +			continue; +		val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE; +		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);  	}  	return enabled; @@ -372,13 +374,15 @@ static void pmc_amd_restore_all(u64 ctrl)  		return;  	for (idx = 0; idx < nr_counters_generic; idx++) { -		if (test_bit(idx, cpuc->active_mask)) { -			u64 val; +		u64 val; -			rdmsrl(MSR_K7_EVNTSEL0 + idx, val); -			val |= ARCH_PERFMON_EVENTSEL0_ENABLE; -			wrmsrl(MSR_K7_EVNTSEL0 + idx, val); -		} +		if (!test_bit(idx, cpuc->active_mask)) +			continue; +		rdmsrl(MSR_K7_EVNTSEL0 + idx, val); +		if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) +			continue; +		val |= ARCH_PERFMON_EVENTSEL0_ENABLE; +		wrmsrl(MSR_K7_EVNTSEL0 + idx, val);  	}  }  | 
