Skip to content

Commit 3d716c5

Browse files
Weili Qianherbertx
authored andcommitted
crypto: hisilicon/qm - mask axi error before memory init
After the device memory is cleared, if the software sends the doorbell operation, the hardware may trigger a axi error when processing the doorbell. This error is caused by memory clearing and hardware access to address 0. Therefore, the axi error is masked during this period. Signed-off-by: Weili Qian <qianweili@huawei.com> Signed-off-by: Chenghai Huang <huangchenghai2@huawei.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 85acd1b commit 3d716c5

5 files changed

Lines changed: 257 additions & 122 deletions

File tree

drivers/crypto/hisilicon/hpre/hpre_main.c

Lines changed: 66 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
#define HPRE_HAC_RAS_NFE_ENB 0x301414
4040
#define HPRE_HAC_RAS_FE_ENB 0x301418
4141
#define HPRE_HAC_INT_SET 0x301500
42+
#define HPRE_AXI_ERROR_MASK GENMASK(21, 10)
4243
#define HPRE_RNG_TIMEOUT_NUM 0x301A34
4344
#define HPRE_CORE_INT_ENABLE 0
4445
#define HPRE_RDCHN_INI_ST 0x301a00
@@ -798,8 +799,7 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
798799
val1 = readl(qm->io_base + HPRE_AM_OOO_SHUTDOWN_ENB);
799800
if (enable) {
800801
val1 |= HPRE_AM_OOO_SHUTDOWN_ENABLE;
801-
val2 = hisi_qm_get_hw_info(qm, hpre_basic_info,
802-
HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
802+
val2 = qm->err_info.dev_err.shutdown_mask;
803803
} else {
804804
val1 &= ~HPRE_AM_OOO_SHUTDOWN_ENABLE;
805805
val2 = 0x0;
@@ -813,38 +813,33 @@ static void hpre_master_ooo_ctrl(struct hisi_qm *qm, bool enable)
813813

814814
static void hpre_hw_error_disable(struct hisi_qm *qm)
815815
{
816-
u32 ce, nfe;
817-
818-
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
819-
nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
816+
struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
817+
u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
820818

821819
/* disable hpre hw error interrupts */
822-
writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_INT_MASK);
820+
writel(err_mask, qm->io_base + HPRE_INT_MASK);
823821
/* disable HPRE block master OOO when nfe occurs on Kunpeng930 */
824822
hpre_master_ooo_ctrl(qm, false);
825823
}
826824

827825
static void hpre_hw_error_enable(struct hisi_qm *qm)
828826
{
829-
u32 ce, nfe, err_en;
830-
831-
ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
832-
nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
827+
struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
828+
u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
833829

834830
/* clear HPRE hw error source if having */
835-
writel(ce | nfe | HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_HAC_SOURCE_INT);
831+
writel(err_mask, qm->io_base + HPRE_HAC_SOURCE_INT);
836832

837833
/* configure error type */
838-
writel(ce, qm->io_base + HPRE_RAS_CE_ENB);
839-
writel(nfe, qm->io_base + HPRE_RAS_NFE_ENB);
840-
writel(HPRE_HAC_RAS_FE_ENABLE, qm->io_base + HPRE_RAS_FE_ENB);
834+
writel(dev_err->ce, qm->io_base + HPRE_RAS_CE_ENB);
835+
writel(dev_err->nfe, qm->io_base + HPRE_RAS_NFE_ENB);
836+
writel(dev_err->fe, qm->io_base + HPRE_RAS_FE_ENB);
841837

842838
/* enable HPRE block master OOO when nfe occurs on Kunpeng930 */
843839
hpre_master_ooo_ctrl(qm, true);
844840

845841
/* enable hpre hw error interrupts */
846-
err_en = ce | nfe | HPRE_HAC_RAS_FE_ENABLE;
847-
writel(~err_en, qm->io_base + HPRE_INT_MASK);
842+
writel(~err_mask, qm->io_base + HPRE_INT_MASK);
848843
}
849844

850845
static inline struct hisi_qm *hpre_file_to_qm(struct hpre_debugfs_file *file)
@@ -1399,9 +1394,8 @@ static void hpre_clear_hw_err_status(struct hisi_qm *qm, u32 err_sts)
13991394

14001395
static void hpre_disable_error_report(struct hisi_qm *qm, u32 err_type)
14011396
{
1402-
u32 nfe_mask;
1397+
u32 nfe_mask = qm->err_info.dev_err.nfe;
14031398

1404-
nfe_mask = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
14051399
writel(nfe_mask & (~err_type), qm->io_base + HPRE_RAS_NFE_ENB);
14061400
}
14071401

@@ -1422,11 +1416,11 @@ static enum acc_err_result hpre_get_err_result(struct hisi_qm *qm)
14221416

14231417
err_status = hpre_get_hw_err_status(qm);
14241418
if (err_status) {
1425-
if (err_status & qm->err_info.ecc_2bits_mask)
1419+
if (err_status & qm->err_info.dev_err.ecc_2bits_mask)
14261420
qm->err_status.is_dev_ecc_mbit = true;
14271421
hpre_log_hw_error(qm, err_status);
14281422

1429-
if (err_status & qm->err_info.dev_reset_mask) {
1423+
if (err_status & qm->err_info.dev_err.reset_mask) {
14301424
/* Disable the same error reporting until device is recovered. */
14311425
hpre_disable_error_report(qm, err_status);
14321426
return ACC_ERR_NEED_RESET;
@@ -1442,28 +1436,64 @@ static bool hpre_dev_is_abnormal(struct hisi_qm *qm)
14421436
u32 err_status;
14431437

14441438
err_status = hpre_get_hw_err_status(qm);
1445-
if (err_status & qm->err_info.dev_shutdown_mask)
1439+
if (err_status & qm->err_info.dev_err.shutdown_mask)
14461440
return true;
14471441

14481442
return false;
14491443
}
14501444

1445+
static void hpre_disable_axi_error(struct hisi_qm *qm)
1446+
{
1447+
struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
1448+
u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
1449+
u32 val;
1450+
1451+
val = ~(err_mask & (~HPRE_AXI_ERROR_MASK));
1452+
writel(val, qm->io_base + HPRE_INT_MASK);
1453+
1454+
if (qm->ver > QM_HW_V2)
1455+
writel(dev_err->shutdown_mask & (~HPRE_AXI_ERROR_MASK),
1456+
qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
1457+
}
1458+
1459+
static void hpre_enable_axi_error(struct hisi_qm *qm)
1460+
{
1461+
struct hisi_qm_err_mask *dev_err = &qm->err_info.dev_err;
1462+
u32 err_mask = dev_err->ce | dev_err->nfe | dev_err->fe;
1463+
1464+
/* clear axi error source */
1465+
writel(HPRE_AXI_ERROR_MASK, qm->io_base + HPRE_HAC_SOURCE_INT);
1466+
1467+
writel(~err_mask, qm->io_base + HPRE_INT_MASK);
1468+
1469+
if (qm->ver > QM_HW_V2)
1470+
writel(dev_err->shutdown_mask, qm->io_base + HPRE_OOO_SHUTDOWN_SEL);
1471+
}
1472+
14511473
static void hpre_err_info_init(struct hisi_qm *qm)
14521474
{
14531475
struct hisi_qm_err_info *err_info = &qm->err_info;
1476+
struct hisi_qm_err_mask *qm_err = &err_info->qm_err;
1477+
struct hisi_qm_err_mask *dev_err = &err_info->dev_err;
1478+
1479+
qm_err->fe = HPRE_HAC_RAS_FE_ENABLE;
1480+
qm_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
1481+
qm_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
1482+
qm_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1483+
HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1484+
qm_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1485+
HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
1486+
qm_err->ecc_2bits_mask = QM_ECC_MBIT;
1487+
1488+
dev_err->fe = HPRE_HAC_RAS_FE_ENABLE;
1489+
dev_err->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_CE_MASK_CAP, qm->cap_ver);
1490+
dev_err->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_NFE_MASK_CAP, qm->cap_ver);
1491+
dev_err->shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1492+
HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1493+
dev_err->reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1494+
HPRE_RESET_MASK_CAP, qm->cap_ver);
1495+
dev_err->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
14541496

1455-
err_info->fe = HPRE_HAC_RAS_FE_ENABLE;
1456-
err_info->ce = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_CE_MASK_CAP, qm->cap_ver);
1457-
err_info->nfe = hisi_qm_get_hw_info(qm, hpre_basic_info, HPRE_QM_NFE_MASK_CAP, qm->cap_ver);
1458-
err_info->ecc_2bits_mask = HPRE_CORE_ECC_2BIT_ERR | HPRE_OOO_ECC_2BIT_ERR;
1459-
err_info->dev_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1460-
HPRE_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1461-
err_info->qm_shutdown_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1462-
HPRE_QM_OOO_SHUTDOWN_MASK_CAP, qm->cap_ver);
1463-
err_info->qm_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1464-
HPRE_QM_RESET_MASK_CAP, qm->cap_ver);
1465-
err_info->dev_reset_mask = hisi_qm_get_hw_info(qm, hpre_basic_info,
1466-
HPRE_RESET_MASK_CAP, qm->cap_ver);
14671497
err_info->msi_wr_port = HPRE_WR_MSI_PORT;
14681498
err_info->acpi_rst = "HRST";
14691499
}
@@ -1481,6 +1511,8 @@ static const struct hisi_qm_err_ini hpre_err_ini = {
14811511
.err_info_init = hpre_err_info_init,
14821512
.get_err_result = hpre_get_err_result,
14831513
.dev_is_abnormal = hpre_dev_is_abnormal,
1514+
.disable_axi_error = hpre_disable_axi_error,
1515+
.enable_axi_error = hpre_enable_axi_error,
14841516
};
14851517

14861518
static int hpre_pf_probe_init(struct hpre *hpre)

drivers/crypto/hisilicon/qm.c

Lines changed: 47 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -147,9 +147,9 @@
147147
#define QM_RAS_CE_TIMES_PER_IRQ 1
148148
#define QM_OOO_SHUTDOWN_SEL 0x1040f8
149149
#define QM_AXI_RRESP_ERR BIT(0)
150-
#define QM_ECC_MBIT BIT(2)
151150
#define QM_DB_TIMEOUT BIT(10)
152151
#define QM_OF_FIFO_OF BIT(11)
152+
#define QM_RAS_AXI_ERROR (BIT(0) | BIT(1) | BIT(12))
153153

154154
#define QM_RESET_WAIT_TIMEOUT 400
155155
#define QM_PEH_VENDOR_ID 0x1000d8
@@ -165,7 +165,6 @@
165165
#define ACC_MASTER_TRANS_RETURN 0x300150
166166
#define ACC_MASTER_GLOBAL_CTRL 0x300000
167167
#define ACC_AM_CFG_PORT_WR_EN 0x30001c
168-
#define QM_RAS_NFE_MBIT_DISABLE ~QM_ECC_MBIT
169168
#define ACC_AM_ROB_ECC_INT_STS 0x300104
170169
#define ACC_ROB_ECC_ERR_MULTPL BIT(1)
171170
#define QM_MSI_CAP_ENABLE BIT(16)
@@ -522,7 +521,7 @@ static bool qm_check_dev_error(struct hisi_qm *qm)
522521
return false;
523522

524523
err_status = qm_get_hw_error_status(pf_qm);
525-
if (err_status & pf_qm->err_info.qm_shutdown_mask)
524+
if (err_status & pf_qm->err_info.qm_err.shutdown_mask)
526525
return true;
527526

528527
if (pf_qm->err_ini->dev_is_abnormal)
@@ -1397,17 +1396,17 @@ static void qm_hw_error_init_v1(struct hisi_qm *qm)
13971396

13981397
static void qm_hw_error_cfg(struct hisi_qm *qm)
13991398
{
1400-
struct hisi_qm_err_info *err_info = &qm->err_info;
1399+
struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
14011400

1402-
qm->error_mask = err_info->nfe | err_info->ce | err_info->fe;
1401+
qm->error_mask = qm_err->nfe | qm_err->ce | qm_err->fe;
14031402
/* clear QM hw residual error source */
14041403
writel(qm->error_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
14051404

14061405
/* configure error type */
1407-
writel(err_info->ce, qm->io_base + QM_RAS_CE_ENABLE);
1406+
writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
14081407
writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1409-
writel(err_info->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1410-
writel(err_info->fe, qm->io_base + QM_RAS_FE_ENABLE);
1408+
writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1409+
writel(qm_err->fe, qm->io_base + QM_RAS_FE_ENABLE);
14111410
}
14121411

14131412
static void qm_hw_error_init_v2(struct hisi_qm *qm)
@@ -1436,7 +1435,7 @@ static void qm_hw_error_init_v3(struct hisi_qm *qm)
14361435
qm_hw_error_cfg(qm);
14371436

14381437
/* enable close master ooo when hardware error happened */
1439-
writel(qm->err_info.qm_shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
1438+
writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
14401439

14411440
irq_unmask = ~qm->error_mask;
14421441
irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
@@ -1498,6 +1497,7 @@ static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
14981497

14991498
static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
15001499
{
1500+
struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
15011501
u32 error_status;
15021502

15031503
error_status = qm_get_hw_error_status(qm);
@@ -1506,17 +1506,16 @@ static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
15061506
qm->err_status.is_qm_ecc_mbit = true;
15071507

15081508
qm_log_hw_error(qm, error_status);
1509-
if (error_status & qm->err_info.qm_reset_mask) {
1509+
if (error_status & qm_err->reset_mask) {
15101510
/* Disable the same error reporting until device is recovered. */
1511-
writel(qm->err_info.nfe & (~error_status),
1512-
qm->io_base + QM_RAS_NFE_ENABLE);
1511+
writel(qm_err->nfe & (~error_status), qm->io_base + QM_RAS_NFE_ENABLE);
15131512
return ACC_ERR_NEED_RESET;
15141513
}
15151514

15161515
/* Clear error source if not need reset. */
15171516
writel(error_status, qm->io_base + QM_ABNORMAL_INT_SOURCE);
1518-
writel(qm->err_info.nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1519-
writel(qm->err_info.ce, qm->io_base + QM_RAS_CE_ENABLE);
1517+
writel(qm_err->nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1518+
writel(qm_err->ce, qm->io_base + QM_RAS_CE_ENABLE);
15201519
}
15211520

15221521
return ACC_ERR_RECOVERED;
@@ -4227,9 +4226,9 @@ static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
42274226
!qm->err_status.is_qm_ecc_mbit &&
42284227
!qm->err_ini->close_axi_master_ooo) {
42294228
nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
4230-
writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
4229+
writel(nfe_enb & ~qm->err_info.qm_err.ecc_2bits_mask,
42314230
qm->io_base + QM_RAS_NFE_ENABLE);
4232-
writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
4231+
writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SET);
42334232
}
42344233
}
42354234

@@ -4508,12 +4507,12 @@ static void qm_restart_prepare(struct hisi_qm *qm)
45084507
qm->io_base + ACC_AM_CFG_PORT_WR_EN);
45094508

45104509
/* clear dev ecc 2bit error source if having */
4511-
value = qm_get_dev_err_status(qm) & qm->err_info.ecc_2bits_mask;
4510+
value = qm_get_dev_err_status(qm) & qm->err_info.dev_err.ecc_2bits_mask;
45124511
if (value && qm->err_ini->clear_dev_hw_err_status)
45134512
qm->err_ini->clear_dev_hw_err_status(qm, value);
45144513

45154514
/* clear QM ecc mbit error source */
4516-
writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4515+
writel(qm->err_info.qm_err.ecc_2bits_mask, qm->io_base + QM_ABNORMAL_INT_SOURCE);
45174516

45184517
/* clear AM Reorder Buffer ecc mbit source */
45194518
writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
@@ -4540,6 +4539,34 @@ static void qm_restart_done(struct hisi_qm *qm)
45404539
qm->err_status.is_dev_ecc_mbit = false;
45414540
}
45424541

4542+
static void qm_disable_axi_error(struct hisi_qm *qm)
4543+
{
4544+
struct hisi_qm_err_mask *qm_err = &qm->err_info.qm_err;
4545+
u32 val;
4546+
4547+
val = ~(qm->error_mask & (~QM_RAS_AXI_ERROR));
4548+
writel(val, qm->io_base + QM_ABNORMAL_INT_MASK);
4549+
if (qm->ver > QM_HW_V2)
4550+
writel(qm_err->shutdown_mask & (~QM_RAS_AXI_ERROR),
4551+
qm->io_base + QM_OOO_SHUTDOWN_SEL);
4552+
4553+
if (qm->err_ini->disable_axi_error)
4554+
qm->err_ini->disable_axi_error(qm);
4555+
}
4556+
4557+
static void qm_enable_axi_error(struct hisi_qm *qm)
4558+
{
4559+
/* clear axi error source */
4560+
writel(QM_RAS_AXI_ERROR, qm->io_base + QM_ABNORMAL_INT_SOURCE);
4561+
4562+
writel(~qm->error_mask, qm->io_base + QM_ABNORMAL_INT_MASK);
4563+
if (qm->ver > QM_HW_V2)
4564+
writel(qm->err_info.qm_err.shutdown_mask, qm->io_base + QM_OOO_SHUTDOWN_SEL);
4565+
4566+
if (qm->err_ini->enable_axi_error)
4567+
qm->err_ini->enable_axi_error(qm);
4568+
}
4569+
45434570
static int qm_controller_reset_done(struct hisi_qm *qm)
45444571
{
45454572
struct pci_dev *pdev = qm->pdev;
@@ -4573,6 +4600,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
45734600

45744601
qm_restart_prepare(qm);
45754602
hisi_qm_dev_err_init(qm);
4603+
qm_disable_axi_error(qm);
45764604
if (qm->err_ini->open_axi_master_ooo)
45774605
qm->err_ini->open_axi_master_ooo(qm);
45784606

@@ -4595,7 +4623,7 @@ static int qm_controller_reset_done(struct hisi_qm *qm)
45954623
ret = qm_wait_vf_prepare_finish(qm);
45964624
if (ret)
45974625
pci_err(pdev, "failed to start by vfs in soft reset!\n");
4598-
4626+
qm_enable_axi_error(qm);
45994627
qm_cmd_init(qm);
46004628
qm_restart_done(qm);
46014629

0 commit comments

Comments
 (0)