11// SPDX-License-Identifier: GPL-2.0
2- /* Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
2+ /*
3+ * Marvell CN10K DRAM Subsystem (DSS) Performance Monitor Driver
34 *
4- * Copyright (C) 2021 Marvell.
5+ * Copyright (C) 2021-2024 Marvell.
56 */
67
78#include <linux/init.h>
1415#include <linux/platform_device.h>
1516
1617/* Performance Counters Operating Mode Control Registers */
17- #define DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
18- #define OP_MODE_CTRL_VAL_MANNUAL 0x1
18+ #define CN10K_DDRC_PERF_CNT_OP_MODE_CTRL 0x8020
19+ #define OP_MODE_CTRL_VAL_MANUAL 0x1
1920
2021/* Performance Counters Start Operation Control Registers */
21- #define DDRC_PERF_CNT_START_OP_CTRL 0x8028
22+ #define CN10K_DDRC_PERF_CNT_START_OP_CTRL 0x8028
2223#define START_OP_CTRL_VAL_START 0x1ULL
2324#define START_OP_CTRL_VAL_ACTIVE 0x2
2425
2526/* Performance Counters End Operation Control Registers */
26- #define DDRC_PERF_CNT_END_OP_CTRL 0x8030
27+ #define CN10K_DDRC_PERF_CNT_END_OP_CTRL 0x8030
2728#define END_OP_CTRL_VAL_END 0x1ULL
2829
2930/* Performance Counters End Status Registers */
30- #define DDRC_PERF_CNT_END_STATUS 0x8038
31+ #define CN10K_DDRC_PERF_CNT_END_STATUS 0x8038
3132#define END_STATUS_VAL_END_TIMER_MODE_END 0x1
3233
3334/* Performance Counters Configuration Registers */
34- #define DDRC_PERF_CFG_BASE 0x8040
35+ #define CN10K_DDRC_PERF_CFG_BASE 0x8040
3536
3637/* 8 Generic event counter + 2 fixed event counters */
3738#define DDRC_PERF_NUM_GEN_COUNTERS 8
4243 DDRC_PERF_NUM_FIX_COUNTERS)
4344
4445/* Generic event counter registers */
45- #define DDRC_PERF_CFG (n ) (DDRC_PERF_CFG_BASE + 8 * (n))
46+ #define DDRC_PERF_CFG (base , n ) ((base) + 8 * (n))
4647#define EVENT_ENABLE BIT_ULL(63)
4748
4849/* Two dedicated event counters for DDR reads and writes */
4950#define EVENT_DDR_READS 101
5051#define EVENT_DDR_WRITES 100
5152
53+ #define DDRC_PERF_REG (base , n ) ((base) + 8 * (n))
5254/*
5355 * programmable events IDs in programmable event counters.
5456 * DO NOT change these event-id numbers, they are used to
102104#define EVENT_HIF_RD_OR_WR 1
103105
104106/* Event counter value registers */
105- #define DDRC_PERF_CNT_VALUE_BASE 0x8080
106- #define DDRC_PERF_CNT_VALUE (n ) (DDRC_PERF_CNT_VALUE_BASE + 8 * (n))
107+ #define CN10K_DDRC_PERF_CNT_VALUE_BASE 0x8080
107108
108109/* Fixed event counter enable/disable register */
109- #define DDRC_PERF_CNT_FREERUN_EN 0x80C0
110+ #define CN10K_DDRC_PERF_CNT_FREERUN_EN 0x80C0
110111#define DDRC_PERF_FREERUN_WRITE_EN 0x1
111112#define DDRC_PERF_FREERUN_READ_EN 0x2
112113
113114/* Fixed event counter control register */
114- #define DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
115+ #define CN10K_DDRC_PERF_CNT_FREERUN_CTRL 0x80C8
115116#define DDRC_FREERUN_WRITE_CNT_CLR 0x1
116117#define DDRC_FREERUN_READ_CNT_CLR 0x2
117118
118- /* Fixed event counter value register */
119- #define DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
120- #define DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
121119#define DDRC_PERF_CNT_VALUE_OVERFLOW BIT_ULL(48)
122120#define DDRC_PERF_CNT_MAX_VALUE GENMASK_ULL(48, 0)
123121
122+ /* Fixed event counter value register */
123+ #define CN10K_DDRC_PERF_CNT_VALUE_WR_OP 0x80D0
124+ #define CN10K_DDRC_PERF_CNT_VALUE_RD_OP 0x80D8
125+
124126struct cn10k_ddr_pmu {
125127 struct pmu pmu ;
126128 void __iomem * base ;
129+ const struct ddr_pmu_platform_data * p_data ;
127130 unsigned int cpu ;
128131 struct device * dev ;
129132 int active_events ;
@@ -134,6 +137,23 @@ struct cn10k_ddr_pmu {
134137
135138#define to_cn10k_ddr_pmu (p ) container_of(p, struct cn10k_ddr_pmu, pmu)
136139
140+ struct ddr_pmu_platform_data {
141+ u64 counter_overflow_val ;
142+ u64 counter_max_val ;
143+ u64 cnt_base ;
144+ u64 cfg_base ;
145+ u64 cnt_op_mode_ctrl ;
146+ u64 cnt_start_op_ctrl ;
147+ u64 cnt_end_op_ctrl ;
148+ u64 cnt_end_status ;
149+ u64 cnt_freerun_en ;
150+ u64 cnt_freerun_ctrl ;
151+ u64 cnt_freerun_clr ;
152+ u64 cnt_value_wr_op ;
153+ u64 cnt_value_rd_op ;
154+ bool is_cn10k ;
155+ };
156+
137157static ssize_t cn10k_ddr_pmu_event_show (struct device * dev ,
138158 struct device_attribute * attr ,
139159 char * page )
@@ -354,6 +374,7 @@ static int cn10k_ddr_perf_event_init(struct perf_event *event)
354374static void cn10k_ddr_perf_counter_enable (struct cn10k_ddr_pmu * pmu ,
355375 int counter , bool enable )
356376{
377+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
357378 u32 reg ;
358379 u64 val ;
359380
@@ -363,7 +384,7 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
363384 }
364385
365386 if (counter < DDRC_PERF_NUM_GEN_COUNTERS ) {
366- reg = DDRC_PERF_CFG (counter );
387+ reg = DDRC_PERF_CFG (p_data -> cfg_base , counter );
367388 val = readq_relaxed (pmu -> base + reg );
368389
369390 if (enable )
@@ -373,7 +394,8 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
373394
374395 writeq_relaxed (val , pmu -> base + reg );
375396 } else {
376- val = readq_relaxed (pmu -> base + DDRC_PERF_CNT_FREERUN_EN );
397+ val = readq_relaxed (pmu -> base +
398+ p_data -> cnt_freerun_en );
377399 if (enable ) {
378400 if (counter == DDRC_PERF_READ_COUNTER_IDX )
379401 val |= DDRC_PERF_FREERUN_READ_EN ;
@@ -385,27 +407,33 @@ static void cn10k_ddr_perf_counter_enable(struct cn10k_ddr_pmu *pmu,
385407 else
386408 val &= ~DDRC_PERF_FREERUN_WRITE_EN ;
387409 }
388- writeq_relaxed (val , pmu -> base + DDRC_PERF_CNT_FREERUN_EN );
410+ writeq_relaxed (val , pmu -> base +
411+ p_data -> cnt_freerun_en );
389412 }
390413}
391414
392415static u64 cn10k_ddr_perf_read_counter (struct cn10k_ddr_pmu * pmu , int counter )
393416{
417+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
394418 u64 val ;
395419
396420 if (counter == DDRC_PERF_READ_COUNTER_IDX )
397- return readq_relaxed (pmu -> base + DDRC_PERF_CNT_VALUE_RD_OP );
421+ return readq_relaxed (pmu -> base +
422+ p_data -> cnt_value_rd_op );
398423
399424 if (counter == DDRC_PERF_WRITE_COUNTER_IDX )
400- return readq_relaxed (pmu -> base + DDRC_PERF_CNT_VALUE_WR_OP );
425+ return readq_relaxed (pmu -> base +
426+ p_data -> cnt_value_wr_op );
401427
402- val = readq_relaxed (pmu -> base + DDRC_PERF_CNT_VALUE (counter ));
428+ val = readq_relaxed (pmu -> base +
429+ DDRC_PERF_REG (p_data -> cnt_base , counter ));
403430 return val ;
404431}
405432
406433static void cn10k_ddr_perf_event_update (struct perf_event * event )
407434{
408435 struct cn10k_ddr_pmu * pmu = to_cn10k_ddr_pmu (event -> pmu );
436+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
409437 struct hw_perf_event * hwc = & event -> hw ;
410438 u64 prev_count , new_count , mask ;
411439
@@ -414,7 +442,7 @@ static void cn10k_ddr_perf_event_update(struct perf_event *event)
414442 new_count = cn10k_ddr_perf_read_counter (pmu , hwc -> idx );
415443 } while (local64_xchg (& hwc -> prev_count , new_count ) != prev_count );
416444
417- mask = DDRC_PERF_CNT_MAX_VALUE ;
445+ mask = p_data -> counter_max_val ;
418446
419447 local64_add ((new_count - prev_count ) & mask , & event -> count );
420448}
@@ -435,6 +463,7 @@ static void cn10k_ddr_perf_event_start(struct perf_event *event, int flags)
435463static int cn10k_ddr_perf_event_add (struct perf_event * event , int flags )
436464{
437465 struct cn10k_ddr_pmu * pmu = to_cn10k_ddr_pmu (event -> pmu );
466+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
438467 struct hw_perf_event * hwc = & event -> hw ;
439468 u8 config = event -> attr .config ;
440469 int counter , ret ;
@@ -454,7 +483,7 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
454483
455484 if (counter < DDRC_PERF_NUM_GEN_COUNTERS ) {
456485 /* Generic counters, configure event id */
457- reg_offset = DDRC_PERF_CFG (counter );
486+ reg_offset = DDRC_PERF_CFG (p_data -> cfg_base , counter );
458487 ret = ddr_perf_get_event_bitmap (config , & val );
459488 if (ret )
460489 return ret ;
@@ -467,7 +496,7 @@ static int cn10k_ddr_perf_event_add(struct perf_event *event, int flags)
467496 else
468497 val = DDRC_FREERUN_WRITE_CNT_CLR ;
469498
470- writeq_relaxed (val , pmu -> base + DDRC_PERF_CNT_FREERUN_CTRL );
499+ writeq_relaxed (val , pmu -> base + p_data -> cnt_freerun_ctrl );
471500 }
472501
473502 hwc -> state |= PERF_HES_STOPPED ;
@@ -512,17 +541,19 @@ static void cn10k_ddr_perf_event_del(struct perf_event *event, int flags)
512541static void cn10k_ddr_perf_pmu_enable (struct pmu * pmu )
513542{
514543 struct cn10k_ddr_pmu * ddr_pmu = to_cn10k_ddr_pmu (pmu );
544+ const struct ddr_pmu_platform_data * p_data = ddr_pmu -> p_data ;
515545
516546 writeq_relaxed (START_OP_CTRL_VAL_START , ddr_pmu -> base +
517- DDRC_PERF_CNT_START_OP_CTRL );
547+ p_data -> cnt_start_op_ctrl );
518548}
519549
520550static void cn10k_ddr_perf_pmu_disable (struct pmu * pmu )
521551{
522552 struct cn10k_ddr_pmu * ddr_pmu = to_cn10k_ddr_pmu (pmu );
553+ const struct ddr_pmu_platform_data * p_data = ddr_pmu -> p_data ;
523554
524555 writeq_relaxed (END_OP_CTRL_VAL_END , ddr_pmu -> base +
525- DDRC_PERF_CNT_END_OP_CTRL );
556+ p_data -> cnt_end_op_ctrl );
526557}
527558
528559static void cn10k_ddr_perf_event_update_all (struct cn10k_ddr_pmu * pmu )
@@ -549,6 +580,7 @@ static void cn10k_ddr_perf_event_update_all(struct cn10k_ddr_pmu *pmu)
549580
550581static irqreturn_t cn10k_ddr_pmu_overflow_handler (struct cn10k_ddr_pmu * pmu )
551582{
583+ const struct ddr_pmu_platform_data * p_data = pmu -> p_data ;
552584 struct perf_event * event ;
553585 struct hw_perf_event * hwc ;
554586 u64 prev_count , new_count ;
@@ -586,7 +618,7 @@ static irqreturn_t cn10k_ddr_pmu_overflow_handler(struct cn10k_ddr_pmu *pmu)
586618 continue ;
587619
588620 value = cn10k_ddr_perf_read_counter (pmu , i );
589- if (value == DDRC_PERF_CNT_MAX_VALUE ) {
621+ if (value == p_data -> counter_max_val ) {
590622 pr_info ("Counter-(%d) reached max value\n" , i );
591623 cn10k_ddr_perf_event_update_all (pmu );
592624 cn10k_ddr_perf_pmu_disable (& pmu -> pmu );
@@ -629,11 +661,32 @@ static int cn10k_ddr_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
629661 return 0 ;
630662}
631663
664+ #if defined(CONFIG_ACPI ) || defined(CONFIG_OF )
665+ static const struct ddr_pmu_platform_data cn10k_ddr_pmu_pdata = {
666+ .counter_overflow_val = BIT_ULL (48 ),
667+ .counter_max_val = GENMASK_ULL (48 , 0 ),
668+ .cnt_base = CN10K_DDRC_PERF_CNT_VALUE_BASE ,
669+ .cfg_base = CN10K_DDRC_PERF_CFG_BASE ,
670+ .cnt_op_mode_ctrl = CN10K_DDRC_PERF_CNT_OP_MODE_CTRL ,
671+ .cnt_start_op_ctrl = CN10K_DDRC_PERF_CNT_START_OP_CTRL ,
672+ .cnt_end_op_ctrl = CN10K_DDRC_PERF_CNT_END_OP_CTRL ,
673+ .cnt_end_status = CN10K_DDRC_PERF_CNT_END_STATUS ,
674+ .cnt_freerun_en = CN10K_DDRC_PERF_CNT_FREERUN_EN ,
675+ .cnt_freerun_ctrl = CN10K_DDRC_PERF_CNT_FREERUN_CTRL ,
676+ .cnt_freerun_clr = 0 ,
677+ .cnt_value_wr_op = CN10K_DDRC_PERF_CNT_VALUE_WR_OP ,
678+ .cnt_value_rd_op = CN10K_DDRC_PERF_CNT_VALUE_RD_OP ,
679+ .is_cn10k = TRUE,
680+ };
681+ #endif
682+
632683static int cn10k_ddr_perf_probe (struct platform_device * pdev )
633684{
685+ const struct ddr_pmu_platform_data * dev_data ;
634686 struct cn10k_ddr_pmu * ddr_pmu ;
635687 struct resource * res ;
636688 void __iomem * base ;
689+ bool is_cn10k ;
637690 char * name ;
638691 int ret ;
639692
@@ -644,30 +697,41 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
644697 ddr_pmu -> dev = & pdev -> dev ;
645698 platform_set_drvdata (pdev , ddr_pmu );
646699
700+ dev_data = device_get_match_data (& pdev -> dev );
701+ if (!dev_data ) {
702+ dev_err (& pdev -> dev , "Error: No device match data found\n" );
703+ return - ENODEV ;
704+ }
705+
647706 base = devm_platform_get_and_ioremap_resource (pdev , 0 , & res );
648707 if (IS_ERR (base ))
649708 return PTR_ERR (base );
650709
651710 ddr_pmu -> base = base ;
652711
653- /* Setup the PMU counter to work in manual mode */
654- writeq_relaxed (OP_MODE_CTRL_VAL_MANNUAL , ddr_pmu -> base +
655- DDRC_PERF_CNT_OP_MODE_CTRL );
656-
657- ddr_pmu -> pmu = (struct pmu ) {
658- .module = THIS_MODULE ,
659- .capabilities = PERF_PMU_CAP_NO_EXCLUDE ,
660- .task_ctx_nr = perf_invalid_context ,
661- .attr_groups = cn10k_attr_groups ,
662- .event_init = cn10k_ddr_perf_event_init ,
663- .add = cn10k_ddr_perf_event_add ,
664- .del = cn10k_ddr_perf_event_del ,
665- .start = cn10k_ddr_perf_event_start ,
666- .stop = cn10k_ddr_perf_event_stop ,
667- .read = cn10k_ddr_perf_event_update ,
668- .pmu_enable = cn10k_ddr_perf_pmu_enable ,
669- .pmu_disable = cn10k_ddr_perf_pmu_disable ,
670- };
712+ ddr_pmu -> p_data = dev_data ;
713+ is_cn10k = ddr_pmu -> p_data -> is_cn10k ;
714+
715+ if (is_cn10k ) {
716+ /* Setup the PMU counter to work in manual mode */
717+ writeq_relaxed (OP_MODE_CTRL_VAL_MANUAL , ddr_pmu -> base +
718+ ddr_pmu -> p_data -> cnt_op_mode_ctrl );
719+
720+ ddr_pmu -> pmu = (struct pmu ) {
721+ .module = THIS_MODULE ,
722+ .capabilities = PERF_PMU_CAP_NO_EXCLUDE ,
723+ .task_ctx_nr = perf_invalid_context ,
724+ .attr_groups = cn10k_attr_groups ,
725+ .event_init = cn10k_ddr_perf_event_init ,
726+ .add = cn10k_ddr_perf_event_add ,
727+ .del = cn10k_ddr_perf_event_del ,
728+ .start = cn10k_ddr_perf_event_start ,
729+ .stop = cn10k_ddr_perf_event_stop ,
730+ .read = cn10k_ddr_perf_event_update ,
731+ .pmu_enable = cn10k_ddr_perf_pmu_enable ,
732+ .pmu_disable = cn10k_ddr_perf_pmu_disable ,
733+ };
734+ }
671735
672736 /* Choose this cpu to collect perf data */
673737 ddr_pmu -> cpu = raw_smp_processor_id ();
@@ -688,7 +752,7 @@ static int cn10k_ddr_perf_probe(struct platform_device *pdev)
688752 if (ret )
689753 goto error ;
690754
691- pr_info ("CN10K DDR PMU Driver for ddrc@%llx\n" , res -> start );
755+ pr_info ("DDR PMU Driver for ddrc@%llx\n" , res -> start );
692756 return 0 ;
693757error :
694758 cpuhp_state_remove_instance_nocalls (
@@ -710,15 +774,15 @@ static void cn10k_ddr_perf_remove(struct platform_device *pdev)
710774
711775#ifdef CONFIG_OF
712776static const struct of_device_id cn10k_ddr_pmu_of_match [] = {
713- { .compatible = "marvell,cn10k-ddr-pmu" , },
777+ { .compatible = "marvell,cn10k-ddr-pmu" , . data = & cn10k_ddr_pmu_pdata },
714778 { },
715779};
716780MODULE_DEVICE_TABLE (of , cn10k_ddr_pmu_of_match );
717781#endif
718782
719783#ifdef CONFIG_ACPI
720784static const struct acpi_device_id cn10k_ddr_pmu_acpi_match [] = {
721- {"MRVL000A" , 0 },
785+ {"MRVL000A" , ( kernel_ulong_t ) & cn10k_ddr_pmu_pdata },
722786 {},
723787};
724788MODULE_DEVICE_TABLE (acpi , cn10k_ddr_pmu_acpi_match );
0 commit comments