|
8 | 8 | #include "tailcall_freplace.skel.h" |
9 | 9 | #include "tc_bpf2bpf.skel.h" |
10 | 10 | #include "tailcall_fail.skel.h" |
| 11 | +#include "tailcall_sleepable.skel.h" |
11 | 12 |
|
12 | 13 | /* test_tailcall_1 checks basic functionality by patching multiple locations |
13 | 14 | * in a single program for a single tail call slot with nop->jmp, jmp->nop |
@@ -1653,6 +1654,77 @@ static void test_tailcall_failure() |
1653 | 1654 | RUN_TESTS(tailcall_fail); |
1654 | 1655 | } |
1655 | 1656 |
|
| 1657 | +noinline void uprobe_sleepable_trigger(void) |
| 1658 | +{ |
| 1659 | + asm volatile (""); |
| 1660 | +} |
| 1661 | + |
| 1662 | +static void test_tailcall_sleepable(void) |
| 1663 | +{ |
| 1664 | + LIBBPF_OPTS(bpf_uprobe_opts, opts); |
| 1665 | + struct tailcall_sleepable *skel; |
| 1666 | + int prog_fd, map_fd; |
| 1667 | + int err, key; |
| 1668 | + |
| 1669 | + skel = tailcall_sleepable__open(); |
| 1670 | + if (!ASSERT_OK_PTR(skel, "tailcall_sleepable__open")) |
| 1671 | + return; |
| 1672 | + |
| 1673 | + /* |
| 1674 | + * Test that we can't load uprobe_normal and uprobe_sleepable_1, |
| 1675 | + * because they share tailcall map. |
| 1676 | + */ |
| 1677 | + bpf_program__set_autoload(skel->progs.uprobe_normal, true); |
| 1678 | + bpf_program__set_autoload(skel->progs.uprobe_sleepable_1, true); |
| 1679 | + |
| 1680 | + err = tailcall_sleepable__load(skel); |
| 1681 | + if (!ASSERT_ERR(err, "tailcall_sleepable__load")) |
| 1682 | + goto out; |
| 1683 | + |
| 1684 | + tailcall_sleepable__destroy(skel); |
| 1685 | + |
| 1686 | + /* |
| 1687 | + * Test that we can tail call from sleepable to sleepable program. |
| 1688 | + */ |
| 1689 | + skel = tailcall_sleepable__open(); |
| 1690 | + if (!ASSERT_OK_PTR(skel, "tailcall_sleepable__open")) |
| 1691 | + return; |
| 1692 | + |
| 1693 | + bpf_program__set_autoload(skel->progs.uprobe_sleepable_1, true); |
| 1694 | + bpf_program__set_autoload(skel->progs.uprobe_sleepable_2, true); |
| 1695 | + |
| 1696 | + err = tailcall_sleepable__load(skel); |
| 1697 | + if (!ASSERT_OK(err, "tailcall_sleepable__load")) |
| 1698 | + goto out; |
| 1699 | + |
| 1700 | + /* Add sleepable uprobe_sleepable_2 to jmp_table[0]. */ |
| 1701 | + key = 0; |
| 1702 | + prog_fd = bpf_program__fd(skel->progs.uprobe_sleepable_2); |
| 1703 | + map_fd = bpf_map__fd(skel->maps.jmp_table); |
| 1704 | + err = bpf_map_update_elem(map_fd, &key, &prog_fd, BPF_ANY); |
| 1705 | + if (!ASSERT_OK(err, "update jmp_table")) |
| 1706 | + goto out; |
| 1707 | + |
| 1708 | + skel->bss->my_pid = getpid(); |
| 1709 | + |
| 1710 | + /* Attach uprobe_sleepable_1 to uprobe_sleepable_trigger and hit it. */ |
| 1711 | + opts.func_name = "uprobe_sleepable_trigger"; |
| 1712 | + skel->links.uprobe_sleepable_1 = bpf_program__attach_uprobe_opts( |
| 1713 | + skel->progs.uprobe_sleepable_1, |
| 1714 | + -1, |
| 1715 | + "/proc/self/exe", |
| 1716 | + 0 /* offset */, |
| 1717 | + &opts); |
| 1718 | + if (!ASSERT_OK_PTR(skel->links.uprobe_sleepable_1, "bpf_program__attach_uprobe_opts")) |
| 1719 | + goto out; |
| 1720 | + |
| 1721 | + uprobe_sleepable_trigger(); |
| 1722 | + ASSERT_EQ(skel->bss->executed, 1, "executed"); |
| 1723 | + |
| 1724 | +out: |
| 1725 | + tailcall_sleepable__destroy(skel); |
| 1726 | +} |
| 1727 | + |
1656 | 1728 | void test_tailcalls(void) |
1657 | 1729 | { |
1658 | 1730 | if (test__start_subtest("tailcall_1")) |
@@ -1707,4 +1779,6 @@ void test_tailcalls(void) |
1707 | 1779 | test_tailcall_bpf2bpf_freplace(); |
1708 | 1780 | if (test__start_subtest("tailcall_failure")) |
1709 | 1781 | test_tailcall_failure(); |
| 1782 | + if (test__start_subtest("tailcall_sleepable")) |
| 1783 | + test_tailcall_sleepable(); |
1710 | 1784 | } |
0 commit comments