|
39 | 39 | #define COMMAND_COPY BIT(5) |
40 | 40 | #define COMMAND_ENABLE_DOORBELL BIT(6) |
41 | 41 | #define COMMAND_DISABLE_DOORBELL BIT(7) |
| 42 | +#define COMMAND_BAR_SUBRANGE_SETUP BIT(8) |
| 43 | +#define COMMAND_BAR_SUBRANGE_CLEAR BIT(9) |
42 | 44 |
|
43 | 45 | #define PCI_ENDPOINT_TEST_STATUS 0x8 |
44 | 46 | #define STATUS_READ_SUCCESS BIT(0) |
|
55 | 57 | #define STATUS_DOORBELL_ENABLE_FAIL BIT(11) |
56 | 58 | #define STATUS_DOORBELL_DISABLE_SUCCESS BIT(12) |
57 | 59 | #define STATUS_DOORBELL_DISABLE_FAIL BIT(13) |
| 60 | +#define STATUS_BAR_SUBRANGE_SETUP_SUCCESS BIT(14) |
| 61 | +#define STATUS_BAR_SUBRANGE_SETUP_FAIL BIT(15) |
| 62 | +#define STATUS_BAR_SUBRANGE_CLEAR_SUCCESS BIT(16) |
| 63 | +#define STATUS_BAR_SUBRANGE_CLEAR_FAIL BIT(17) |
58 | 64 |
|
59 | 65 | #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c |
60 | 66 | #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 |
|
77 | 83 | #define CAP_MSI BIT(1) |
78 | 84 | #define CAP_MSIX BIT(2) |
79 | 85 | #define CAP_INTX BIT(3) |
| 86 | +#define CAP_SUBRANGE_MAPPING BIT(4) |
80 | 87 |
|
81 | 88 | #define PCI_ENDPOINT_TEST_DB_BAR 0x34 |
82 | 89 | #define PCI_ENDPOINT_TEST_DB_OFFSET 0x38 |
|
100 | 107 |
|
101 | 108 | #define PCI_DEVICE_ID_ROCKCHIP_RK3588 0x3588 |
102 | 109 |
|
| 110 | +#define PCI_ENDPOINT_TEST_BAR_SUBRANGE_NSUB 2 |
| 111 | + |
103 | 112 | static DEFINE_IDA(pci_endpoint_test_ida); |
104 | 113 |
|
105 | 114 | #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ |
@@ -414,6 +423,193 @@ static int pci_endpoint_test_bars(struct pci_endpoint_test *test) |
414 | 423 | return 0; |
415 | 424 | } |
416 | 425 |
|
| 426 | +static u8 pci_endpoint_test_subrange_sig_byte(enum pci_barno barno, |
| 427 | + unsigned int subno) |
| 428 | +{ |
| 429 | + return 0x50 + (barno * 8) + subno; |
| 430 | +} |
| 431 | + |
| 432 | +static u8 pci_endpoint_test_subrange_test_byte(enum pci_barno barno, |
| 433 | + unsigned int subno) |
| 434 | +{ |
| 435 | + return 0xa0 + (barno * 8) + subno; |
| 436 | +} |
| 437 | + |
| 438 | +static int pci_endpoint_test_bar_subrange_cmd(struct pci_endpoint_test *test, |
| 439 | + enum pci_barno barno, u32 command, |
| 440 | + u32 ok_bit, u32 fail_bit) |
| 441 | +{ |
| 442 | + struct pci_dev *pdev = test->pdev; |
| 443 | + struct device *dev = &pdev->dev; |
| 444 | + int irq_type = test->irq_type; |
| 445 | + u32 status; |
| 446 | + |
| 447 | + if (irq_type < PCITEST_IRQ_TYPE_INTX || |
| 448 | + irq_type > PCITEST_IRQ_TYPE_MSIX) { |
| 449 | + dev_err(dev, "Invalid IRQ type\n"); |
| 450 | + return -EINVAL; |
| 451 | + } |
| 452 | + |
| 453 | + reinit_completion(&test->irq_raised); |
| 454 | + |
| 455 | + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 0); |
| 456 | + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); |
| 457 | + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); |
| 458 | + /* Reuse SIZE as a command parameter: bar number. */ |
| 459 | + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, barno); |
| 460 | + pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, command); |
| 461 | + |
| 462 | + if (!wait_for_completion_timeout(&test->irq_raised, |
| 463 | + msecs_to_jiffies(1000))) |
| 464 | + return -ETIMEDOUT; |
| 465 | + |
| 466 | + status = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); |
| 467 | + if (status & fail_bit) |
| 468 | + return -EIO; |
| 469 | + |
| 470 | + if (!(status & ok_bit)) |
| 471 | + return -EIO; |
| 472 | + |
| 473 | + return 0; |
| 474 | +} |
| 475 | + |
| 476 | +static int pci_endpoint_test_bar_subrange_setup(struct pci_endpoint_test *test, |
| 477 | + enum pci_barno barno) |
| 478 | +{ |
| 479 | + return pci_endpoint_test_bar_subrange_cmd(test, barno, |
| 480 | + COMMAND_BAR_SUBRANGE_SETUP, |
| 481 | + STATUS_BAR_SUBRANGE_SETUP_SUCCESS, |
| 482 | + STATUS_BAR_SUBRANGE_SETUP_FAIL); |
| 483 | +} |
| 484 | + |
| 485 | +static int pci_endpoint_test_bar_subrange_clear(struct pci_endpoint_test *test, |
| 486 | + enum pci_barno barno) |
| 487 | +{ |
| 488 | + return pci_endpoint_test_bar_subrange_cmd(test, barno, |
| 489 | + COMMAND_BAR_SUBRANGE_CLEAR, |
| 490 | + STATUS_BAR_SUBRANGE_CLEAR_SUCCESS, |
| 491 | + STATUS_BAR_SUBRANGE_CLEAR_FAIL); |
| 492 | +} |
| 493 | + |
| 494 | +static int pci_endpoint_test_bar_subrange(struct pci_endpoint_test *test, |
| 495 | + enum pci_barno barno) |
| 496 | +{ |
| 497 | + u32 nsub = PCI_ENDPOINT_TEST_BAR_SUBRANGE_NSUB; |
| 498 | + struct device *dev = &test->pdev->dev; |
| 499 | + size_t sub_size, buf_size; |
| 500 | + resource_size_t bar_size; |
| 501 | + void __iomem *bar_addr; |
| 502 | + void *read_buf = NULL; |
| 503 | + int ret, clear_ret; |
| 504 | + size_t off, chunk; |
| 505 | + u32 i, exp, val; |
| 506 | + u8 pattern; |
| 507 | + |
| 508 | + if (!(test->ep_caps & CAP_SUBRANGE_MAPPING)) |
| 509 | + return -EOPNOTSUPP; |
| 510 | + |
| 511 | + /* |
| 512 | + * The test register BAR is not safe to reprogram and write/read |
| 513 | + * over its full size. BAR_TEST already special-cases it to a tiny |
| 514 | + * range. For subrange mapping tests, let's simply skip it. |
| 515 | + */ |
| 516 | + if (barno == test->test_reg_bar) |
| 517 | + return -EBUSY; |
| 518 | + |
| 519 | + bar_size = pci_resource_len(test->pdev, barno); |
| 520 | + if (!bar_size) |
| 521 | + return -ENODATA; |
| 522 | + |
| 523 | + bar_addr = test->bar[barno]; |
| 524 | + if (!bar_addr) |
| 525 | + return -ENOMEM; |
| 526 | + |
| 527 | + ret = pci_endpoint_test_bar_subrange_setup(test, barno); |
| 528 | + if (ret) |
| 529 | + return ret; |
| 530 | + |
| 531 | + if (bar_size % nsub || bar_size / nsub > SIZE_MAX) { |
| 532 | + ret = -EINVAL; |
| 533 | + goto out_clear; |
| 534 | + } |
| 535 | + |
| 536 | + sub_size = bar_size / nsub; |
| 537 | + if (sub_size < sizeof(u32)) { |
| 538 | + ret = -ENOSPC; |
| 539 | + goto out_clear; |
| 540 | + } |
| 541 | + |
| 542 | + /* Limit the temporary buffer size */ |
| 543 | + buf_size = min_t(size_t, sub_size, SZ_1M); |
| 544 | + |
| 545 | + read_buf = kmalloc(buf_size, GFP_KERNEL); |
| 546 | + if (!read_buf) { |
| 547 | + ret = -ENOMEM; |
| 548 | + goto out_clear; |
| 549 | + } |
| 550 | + |
| 551 | + /* |
| 552 | + * Step 1: verify EP-provided signature per subrange. This detects |
| 553 | + * whether the EP actually applied the submap order. |
| 554 | + */ |
| 555 | + for (i = 0; i < nsub; i++) { |
| 556 | + exp = (u32)pci_endpoint_test_subrange_sig_byte(barno, i) * |
| 557 | + 0x01010101U; |
| 558 | + val = ioread32(bar_addr + (i * sub_size)); |
| 559 | + if (val != exp) { |
| 560 | + dev_err(dev, |
| 561 | + "BAR%d subrange%u signature mismatch @%#zx: exp %#08x got %#08x\n", |
| 562 | + barno, i, (size_t)i * sub_size, exp, val); |
| 563 | + ret = -EIO; |
| 564 | + goto out_clear; |
| 565 | + } |
| 566 | + val = ioread32(bar_addr + (i * sub_size) + sub_size - sizeof(u32)); |
| 567 | + if (val != exp) { |
| 568 | + dev_err(dev, |
| 569 | + "BAR%d subrange%u signature mismatch @%#zx: exp %#08x got %#08x\n", |
| 570 | + barno, i, |
| 571 | + ((size_t)i * sub_size) + sub_size - sizeof(u32), |
| 572 | + exp, val); |
| 573 | + ret = -EIO; |
| 574 | + goto out_clear; |
| 575 | + } |
| 576 | + } |
| 577 | + |
| 578 | + /* Step 2: write unique pattern per subrange (write all first). */ |
| 579 | + for (i = 0; i < nsub; i++) { |
| 580 | + pattern = pci_endpoint_test_subrange_test_byte(barno, i); |
| 581 | + memset_io(bar_addr + (i * sub_size), pattern, sub_size); |
| 582 | + } |
| 583 | + |
| 584 | + /* Step 3: read back and verify (read all after all writes). */ |
| 585 | + for (i = 0; i < nsub; i++) { |
| 586 | + pattern = pci_endpoint_test_subrange_test_byte(barno, i); |
| 587 | + for (off = 0; off < sub_size; off += chunk) { |
| 588 | + void *bad; |
| 589 | + |
| 590 | + chunk = min_t(size_t, buf_size, sub_size - off); |
| 591 | + memcpy_fromio(read_buf, bar_addr + (i * sub_size) + off, |
| 592 | + chunk); |
| 593 | + bad = memchr_inv(read_buf, pattern, chunk); |
| 594 | + if (bad) { |
| 595 | + size_t bad_off = (u8 *)bad - (u8 *)read_buf; |
| 596 | + |
| 597 | + dev_err(dev, |
| 598 | + "BAR%d subrange%u data mismatch @%#zx (pattern %#02x)\n", |
| 599 | + barno, i, (size_t)i * sub_size + off + bad_off, |
| 600 | + pattern); |
| 601 | + ret = -EIO; |
| 602 | + goto out_clear; |
| 603 | + } |
| 604 | + } |
| 605 | + } |
| 606 | + |
| 607 | +out_clear: |
| 608 | + kfree(read_buf); |
| 609 | + clear_ret = pci_endpoint_test_bar_subrange_clear(test, barno); |
| 610 | + return ret ?: clear_ret; |
| 611 | +} |
| 612 | + |
417 | 613 | static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test) |
418 | 614 | { |
419 | 615 | u32 val; |
@@ -936,12 +1132,17 @@ static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, |
936 | 1132 |
|
937 | 1133 | switch (cmd) { |
938 | 1134 | case PCITEST_BAR: |
| 1135 | + case PCITEST_BAR_SUBRANGE: |
939 | 1136 | bar = arg; |
940 | 1137 | if (bar <= NO_BAR || bar > BAR_5) |
941 | 1138 | goto ret; |
942 | 1139 | if (is_am654_pci_dev(pdev) && bar == BAR_0) |
943 | 1140 | goto ret; |
944 | | - ret = pci_endpoint_test_bar(test, bar); |
| 1141 | + |
| 1142 | + if (cmd == PCITEST_BAR) |
| 1143 | + ret = pci_endpoint_test_bar(test, bar); |
| 1144 | + else |
| 1145 | + ret = pci_endpoint_test_bar_subrange(test, bar); |
945 | 1146 | break; |
946 | 1147 | case PCITEST_BARS: |
947 | 1148 | ret = pci_endpoint_test_bars(test); |
|
0 commit comments