1919#include "../perf_event.h"
2020#include "iommu.h"
2121
22- #define COUNTER_SHIFT 16
23-
2422/* iommu pmu conf masks */
2523#define GET_CSOURCE (x ) ((x)->conf & 0xFFULL)
2624#define GET_DEVID (x ) (((x)->conf >> 8) & 0xFFFFULL)
@@ -286,22 +284,31 @@ static void perf_iommu_start(struct perf_event *event, int flags)
286284 WARN_ON_ONCE (!(hwc -> state & PERF_HES_UPTODATE ));
287285 hwc -> state = 0 ;
288286
287+ /*
288+ * To account for power-gating, which prevents write to
289+ * the counter, we need to enable the counter
290+ * before setting up counter register.
291+ */
292+ perf_iommu_enable_event (event );
293+
289294 if (flags & PERF_EF_RELOAD ) {
290- u64 prev_raw_count = local64_read ( & hwc -> prev_count ) ;
295+ u64 count = 0 ;
291296 struct amd_iommu * iommu = perf_event_2_iommu (event );
292297
298+ /*
299+ * Since the IOMMU PMU only support counting mode,
300+ * the counter always start with value zero.
301+ */
293302 amd_iommu_pc_set_reg (iommu , hwc -> iommu_bank , hwc -> iommu_cntr ,
294- IOMMU_PC_COUNTER_REG , & prev_raw_count );
303+ IOMMU_PC_COUNTER_REG , & count );
295304 }
296305
297- perf_iommu_enable_event (event );
298306 perf_event_update_userpage (event );
299-
300307}
301308
302309static void perf_iommu_read (struct perf_event * event )
303310{
304- u64 count , prev , delta ;
311+ u64 count ;
305312 struct hw_perf_event * hwc = & event -> hw ;
306313 struct amd_iommu * iommu = perf_event_2_iommu (event );
307314
@@ -312,14 +319,11 @@ static void perf_iommu_read(struct perf_event *event)
312319 /* IOMMU pc counter register is only 48 bits */
313320 count &= GENMASK_ULL (47 , 0 );
314321
315- prev = local64_read (& hwc -> prev_count );
316- if (local64_cmpxchg (& hwc -> prev_count , prev , count ) != prev )
317- return ;
318-
319- /* Handle 48-bit counter overflow */
320- delta = (count << COUNTER_SHIFT ) - (prev << COUNTER_SHIFT );
321- delta >>= COUNTER_SHIFT ;
322- local64_add (delta , & event -> count );
322+ /*
323+ * Since the counter always start with value zero,
324+ * simply just accumulate the count for the event.
325+ */
326+ local64_add (count , & event -> count );
323327}
324328
325329static void perf_iommu_stop (struct perf_event * event , int flags )
@@ -329,15 +333,16 @@ static void perf_iommu_stop(struct perf_event *event, int flags)
329333 if (hwc -> state & PERF_HES_UPTODATE )
330334 return ;
331335
336+ /*
337+ * To account for power-gating, in which reading the counter would
338+ * return zero, we need to read the register before disabling.
339+ */
340+ perf_iommu_read (event );
341+ hwc -> state |= PERF_HES_UPTODATE ;
342+
332343 perf_iommu_disable_event (event );
333344 WARN_ON_ONCE (hwc -> state & PERF_HES_STOPPED );
334345 hwc -> state |= PERF_HES_STOPPED ;
335-
336- if (hwc -> state & PERF_HES_UPTODATE )
337- return ;
338-
339- perf_iommu_read (event );
340- hwc -> state |= PERF_HES_UPTODATE ;
341346}
342347
343348static int perf_iommu_add (struct perf_event * event , int flags )
0 commit comments