@@ -5271,34 +5271,59 @@ static inline bool intel_pmu_broken_perf_cap(void)
52715271 return false;
52725272}
52735273
5274+ #define counter_mask (_gp , _fixed ) ((_gp) | ((u64)(_fixed) << INTEL_PMC_IDX_FIXED))
5275+
52745276static void update_pmu_cap (struct pmu * pmu )
52755277{
5276- unsigned int cntr , fixed_cntr , ecx , edx ;
5277- union cpuid35_eax eax ;
5278- union cpuid35_ebx ebx ;
5278+ unsigned int eax , ebx , ecx , edx ;
5279+ union cpuid35_eax eax_0 ;
5280+ union cpuid35_ebx ebx_0 ;
5281+ u64 cntrs_mask = 0 ;
5282+ u64 pebs_mask = 0 ;
5283+ u64 pdists_mask = 0 ;
52795284
5280- cpuid (ARCH_PERFMON_EXT_LEAF , & eax .full , & ebx .full , & ecx , & edx );
5285+ cpuid (ARCH_PERFMON_EXT_LEAF , & eax_0 .full , & ebx_0 .full , & ecx , & edx );
52815286
5282- if (ebx .split .umask2 )
5287+ if (ebx_0 .split .umask2 )
52835288 hybrid (pmu , config_mask ) |= ARCH_PERFMON_EVENTSEL_UMASK2 ;
5284- if (ebx .split .eq )
5289+ if (ebx_0 .split .eq )
52855290 hybrid (pmu , config_mask ) |= ARCH_PERFMON_EVENTSEL_EQ ;
52865291
5287- if (eax .split .cntr_subleaf ) {
5292+ if (eax_0 .split .cntr_subleaf ) {
52885293 cpuid_count (ARCH_PERFMON_EXT_LEAF , ARCH_PERFMON_NUM_COUNTER_LEAF ,
5289- & cntr , & fixed_cntr , & ecx , & edx );
5290- hybrid (pmu , cntr_mask64 ) = cntr ;
5291- hybrid (pmu , fixed_cntr_mask64 ) = fixed_cntr ;
5294+ & eax , & ebx , & ecx , & edx );
5295+ hybrid (pmu , cntr_mask64 ) = eax ;
5296+ hybrid (pmu , fixed_cntr_mask64 ) = ebx ;
5297+ cntrs_mask = counter_mask (eax , ebx );
52925298 }
52935299
5294- if (eax .split .acr_subleaf ) {
5300+ if (eax_0 .split .acr_subleaf ) {
52955301 cpuid_count (ARCH_PERFMON_EXT_LEAF , ARCH_PERFMON_ACR_LEAF ,
5296- & cntr , & fixed_cntr , & ecx , & edx );
5302+ & eax , & ebx , & ecx , & edx );
52975303 /* The mask of the counters which can be reloaded */
5298- hybrid (pmu , acr_cntr_mask64 ) = cntr | ((u64 )fixed_cntr << INTEL_PMC_IDX_FIXED );
5299-
5304+ hybrid (pmu , acr_cntr_mask64 ) = counter_mask (eax , ebx );
53005305 /* The mask of the counters which can cause a reload of reloadable counters */
5301- hybrid (pmu , acr_cause_mask64 ) = ecx | ((u64 )edx << INTEL_PMC_IDX_FIXED );
5306+ hybrid (pmu , acr_cause_mask64 ) = counter_mask (ecx , edx );
5307+ }
5308+
5309+ /* Bits[5:4] should be set simultaneously if arch-PEBS is supported */
5310+ if (eax_0 .split .pebs_caps_subleaf && eax_0 .split .pebs_cnts_subleaf ) {
5311+ cpuid_count (ARCH_PERFMON_EXT_LEAF , ARCH_PERFMON_PEBS_CAP_LEAF ,
5312+ & eax , & ebx , & ecx , & edx );
5313+ hybrid (pmu , arch_pebs_cap ).caps = (u64 )ebx << 32 ;
5314+
5315+ cpuid_count (ARCH_PERFMON_EXT_LEAF , ARCH_PERFMON_PEBS_COUNTER_LEAF ,
5316+ & eax , & ebx , & ecx , & edx );
5317+ pebs_mask = counter_mask (eax , ecx );
5318+ pdists_mask = counter_mask (ebx , edx );
5319+ hybrid (pmu , arch_pebs_cap ).counters = pebs_mask ;
5320+ hybrid (pmu , arch_pebs_cap ).pdists = pdists_mask ;
5321+
5322+ if (WARN_ON ((pebs_mask | pdists_mask ) & ~cntrs_mask ))
5323+ x86_pmu .arch_pebs = 0 ;
5324+ } else {
5325+ WARN_ON (x86_pmu .arch_pebs == 1 );
5326+ x86_pmu .arch_pebs = 0 ;
53025327 }
53035328
53045329 if (!intel_pmu_broken_perf_cap ()) {
@@ -6252,7 +6277,7 @@ tsx_is_visible(struct kobject *kobj, struct attribute *attr, int i)
62526277static umode_t
62536278pebs_is_visible (struct kobject * kobj , struct attribute * attr , int i )
62546279{
6255- return x86_pmu . ds_pebs ? attr -> mode : 0 ;
6280+ return intel_pmu_has_pebs () ? attr -> mode : 0 ;
62566281}
62576282
62586283static umode_t
@@ -7728,6 +7753,9 @@ __init int intel_pmu_init(void)
77287753 if (!is_hybrid () && boot_cpu_has (X86_FEATURE_ARCH_PERFMON_EXT ))
77297754 update_pmu_cap (NULL );
77307755
7756+ if (x86_pmu .arch_pebs )
7757+ pr_cont ("Architectural PEBS, " );
7758+
77317759 intel_pmu_check_counters_mask (& x86_pmu .cntr_mask64 ,
77327760 & x86_pmu .fixed_cntr_mask64 ,
77337761 & x86_pmu .intel_ctrl );
0 commit comments