Skip to content

Commit bebfbf0

Browse files
committed
Daniel Borkmann says: ==================== pull-request: bpf-next 2023-08-25 We've added 87 non-merge commits during the last 8 day(s) which contain a total of 104 files changed, 3719 insertions(+), 4212 deletions(-). The main changes are: 1) Add multi uprobe BPF links for attaching multiple uprobes and usdt probes, which is significantly faster and saves extra fds, from Jiri Olsa. 2) Add support BPF cpu v4 instructions for arm64 JIT compiler, from Xu Kuohai. 3) Add support BPF cpu v4 instructions for riscv64 JIT compiler, from Pu Lehui. 4) Fix LWT BPF xmit hooks wrt their return values where propagating the result from skb_do_redirect() would trigger a use-after-free, from Yan Zhai. 5) Fix a BPF verifier issue related to bpf_kptr_xchg() with local kptr where the map's value kptr type and locally allocated obj type mismatch, from Yonghong Song. 6) Fix BPF verifier's check_func_arg_reg_off() function wrt graph root/node which bypassed reg->off == 0 enforcement, from Kumar Kartikeya Dwivedi. 7) Lift BPF verifier restriction in networking BPF programs to treat comparison of packet pointers not as a pointer leak, from Yafang Shao. 8) Remove unmaintained XDP BPF samples as they are maintained in xdp-tools repository out of tree, from Toke Høiland-Jørgensen. 9) Batch of fixes for the tracing programs from BPF samples in order to make them more libbpf-aware, from Daniel T. Lee. 10) Fix a libbpf signedness determination bug in the CO-RE relocation handling logic, from Andrii Nakryiko. 11) Extend libbpf to support CO-RE kfunc relocations. Also follow-up fixes for bpf_refcount shared ownership implementation, both from Dave Marchevsky. 12) Add a new bpf_object__unpin() API function to libbpf, from Daniel Xu. 13) Fix a memory leak in libbpf to also free btf_vmlinux when the bpf_object gets closed, from Hao Luo. 14) Small error output improvements to test_bpf module, from Helge Deller. * tag 'for-netdev' of https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next: (87 commits) selftests/bpf: Add tests for rbtree API interaction in sleepable progs bpf: Allow bpf_spin_{lock,unlock} in sleepable progs bpf: Consider non-owning refs to refcounted nodes RCU protected bpf: Reenable bpf_refcount_acquire bpf: Use bpf_mem_free_rcu when bpf_obj_dropping refcounted nodes bpf: Consider non-owning refs trusted bpf: Ensure kptr_struct_meta is non-NULL for collection insert and refcount_acquire selftests/bpf: Enable cpu v4 tests for RV64 riscv, bpf: Support unconditional bswap insn riscv, bpf: Support signed div/mod insns riscv, bpf: Support 32-bit offset jmp insn riscv, bpf: Support sign-extension mov insns riscv, bpf: Support sign-extension load insns riscv, bpf: Fix missing exception handling and redundant zext for LDX_B/H/W samples/bpf: Add note to README about the XDP utilities moved to xdp-tools samples/bpf: Cleanup .gitignore samples/bpf: Remove the xdp_sample_pkts utility samples/bpf: Remove the xdp1 and xdp2 utilities samples/bpf: Remove the xdp_rxq_info utility samples/bpf: Remove the xdp_redirect* utilities ... ==================== Link: https://lore.kernel.org/r/20230825194319.12727-1-daniel@iogearbox.net Signed-off-by: Jakub Kicinski <kuba@kernel.org>
2 parents 1fa6ffa + ec0ded2 commit bebfbf0

104 files changed

Lines changed: 3719 additions & 4212 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

arch/arm64/include/asm/insn.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -186,6 +186,8 @@ enum aarch64_insn_ldst_type {
186186
AARCH64_INSN_LDST_LOAD_ACQ_EX,
187187
AARCH64_INSN_LDST_STORE_EX,
188188
AARCH64_INSN_LDST_STORE_REL_EX,
189+
AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET,
190+
AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET,
189191
};
190192

191193
enum aarch64_insn_adsb_type {
@@ -324,6 +326,7 @@ __AARCH64_INSN_FUNCS(prfm, 0x3FC00000, 0x39800000)
324326
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
325327
__AARCH64_INSN_FUNCS(store_imm, 0x3FC00000, 0x39000000)
326328
__AARCH64_INSN_FUNCS(load_imm, 0x3FC00000, 0x39400000)
329+
__AARCH64_INSN_FUNCS(signed_load_imm, 0X3FC00000, 0x39800000)
327330
__AARCH64_INSN_FUNCS(store_pre, 0x3FE00C00, 0x38000C00)
328331
__AARCH64_INSN_FUNCS(load_pre, 0x3FE00C00, 0x38400C00)
329332
__AARCH64_INSN_FUNCS(store_post, 0x3FE00C00, 0x38000400)
@@ -337,6 +340,7 @@ __AARCH64_INSN_FUNCS(ldset, 0x3F20FC00, 0x38203000)
337340
__AARCH64_INSN_FUNCS(swp, 0x3F20FC00, 0x38208000)
338341
__AARCH64_INSN_FUNCS(cas, 0x3FA07C00, 0x08A07C00)
339342
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
343+
__AARCH64_INSN_FUNCS(signed_ldr_reg, 0X3FE0FC00, 0x38A0E800)
340344
__AARCH64_INSN_FUNCS(ldr_imm, 0x3FC00000, 0x39400000)
341345
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
342346
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)

arch/arm64/lib/insn.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -385,6 +385,9 @@ u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
385385
case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
386386
insn = aarch64_insn_get_ldr_reg_value();
387387
break;
388+
case AARCH64_INSN_LDST_SIGNED_LOAD_REG_OFFSET:
389+
insn = aarch64_insn_get_signed_ldr_reg_value();
390+
break;
388391
case AARCH64_INSN_LDST_STORE_REG_OFFSET:
389392
insn = aarch64_insn_get_str_reg_value();
390393
break;
@@ -430,6 +433,9 @@ u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
430433
case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
431434
insn = aarch64_insn_get_ldr_imm_value();
432435
break;
436+
case AARCH64_INSN_LDST_SIGNED_LOAD_IMM_OFFSET:
437+
insn = aarch64_insn_get_signed_load_imm_value();
438+
break;
433439
case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
434440
insn = aarch64_insn_get_str_imm_value();
435441
break;

arch/arm64/net/bpf_jit.h

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,10 +59,13 @@
5959
AARCH64_INSN_LDST_##type##_REG_OFFSET)
6060
#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
6161
#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
62+
#define A64_LDRSB(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 8, SIGNED_LOAD)
6263
#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
6364
#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
65+
#define A64_LDRSH(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 16, SIGNED_LOAD)
6466
#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
6567
#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
68+
#define A64_LDRSW(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 32, SIGNED_LOAD)
6669
#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
6770
#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
6871

@@ -73,10 +76,13 @@
7376
AARCH64_INSN_LDST_##type##_IMM_OFFSET)
7477
#define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE)
7578
#define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
79+
#define A64_LDRSBI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 8, SIGNED_LOAD)
7680
#define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE)
7781
#define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
82+
#define A64_LDRSHI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 16, SIGNED_LOAD)
7883
#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
7984
#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
85+
#define A64_LDRSWI(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 32, SIGNED_LOAD)
8086
#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
8187
#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
8288

@@ -186,6 +192,11 @@
186192
#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
187193
#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
188194

195+
/* Sign extend */
196+
#define A64_SXTB(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 7)
197+
#define A64_SXTH(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 15)
198+
#define A64_SXTW(sf, Rd, Rn) A64_SBFM(sf, Rd, Rn, 0, 31)
199+
189200
/* Move wide (immediate) */
190201
#define A64_MOVEW(sf, Rd, imm16, shift, type) \
191202
aarch64_insn_gen_movewide(Rd, imm16, shift, \
@@ -223,6 +234,7 @@
223234
#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
224235
A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
225236
#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
237+
#define A64_SDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, SDIV)
226238
#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
227239
#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
228240
#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)

arch/arm64/net/bpf_jit_comp.c

Lines changed: 75 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -715,7 +715,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
715715
/* First pass */
716716
return 0;
717717

718-
if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
718+
if (BPF_MODE(insn->code) != BPF_PROBE_MEM &&
719+
BPF_MODE(insn->code) != BPF_PROBE_MEMSX)
719720
return 0;
720721

721722
if (!ctx->prog->aux->extable ||
@@ -779,12 +780,26 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
779780
u8 dst_adj;
780781
int off_adj;
781782
int ret;
783+
bool sign_extend;
782784

783785
switch (code) {
784786
/* dst = src */
785787
case BPF_ALU | BPF_MOV | BPF_X:
786788
case BPF_ALU64 | BPF_MOV | BPF_X:
787-
emit(A64_MOV(is64, dst, src), ctx);
789+
switch (insn->off) {
790+
case 0:
791+
emit(A64_MOV(is64, dst, src), ctx);
792+
break;
793+
case 8:
794+
emit(A64_SXTB(is64, dst, src), ctx);
795+
break;
796+
case 16:
797+
emit(A64_SXTH(is64, dst, src), ctx);
798+
break;
799+
case 32:
800+
emit(A64_SXTW(is64, dst, src), ctx);
801+
break;
802+
}
788803
break;
789804
/* dst = dst OP src */
790805
case BPF_ALU | BPF_ADD | BPF_X:
@@ -813,11 +828,17 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
813828
break;
814829
case BPF_ALU | BPF_DIV | BPF_X:
815830
case BPF_ALU64 | BPF_DIV | BPF_X:
816-
emit(A64_UDIV(is64, dst, dst, src), ctx);
831+
if (!off)
832+
emit(A64_UDIV(is64, dst, dst, src), ctx);
833+
else
834+
emit(A64_SDIV(is64, dst, dst, src), ctx);
817835
break;
818836
case BPF_ALU | BPF_MOD | BPF_X:
819837
case BPF_ALU64 | BPF_MOD | BPF_X:
820-
emit(A64_UDIV(is64, tmp, dst, src), ctx);
838+
if (!off)
839+
emit(A64_UDIV(is64, tmp, dst, src), ctx);
840+
else
841+
emit(A64_SDIV(is64, tmp, dst, src), ctx);
821842
emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
822843
break;
823844
case BPF_ALU | BPF_LSH | BPF_X:
@@ -840,11 +861,12 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
840861
/* dst = BSWAP##imm(dst) */
841862
case BPF_ALU | BPF_END | BPF_FROM_LE:
842863
case BPF_ALU | BPF_END | BPF_FROM_BE:
864+
case BPF_ALU64 | BPF_END | BPF_FROM_LE:
843865
#ifdef CONFIG_CPU_BIG_ENDIAN
844-
if (BPF_SRC(code) == BPF_FROM_BE)
866+
if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_BE)
845867
goto emit_bswap_uxt;
846868
#else /* !CONFIG_CPU_BIG_ENDIAN */
847-
if (BPF_SRC(code) == BPF_FROM_LE)
869+
if (BPF_CLASS(code) == BPF_ALU && BPF_SRC(code) == BPF_FROM_LE)
848870
goto emit_bswap_uxt;
849871
#endif
850872
switch (imm) {
@@ -943,12 +965,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
943965
case BPF_ALU | BPF_DIV | BPF_K:
944966
case BPF_ALU64 | BPF_DIV | BPF_K:
945967
emit_a64_mov_i(is64, tmp, imm, ctx);
946-
emit(A64_UDIV(is64, dst, dst, tmp), ctx);
968+
if (!off)
969+
emit(A64_UDIV(is64, dst, dst, tmp), ctx);
970+
else
971+
emit(A64_SDIV(is64, dst, dst, tmp), ctx);
947972
break;
948973
case BPF_ALU | BPF_MOD | BPF_K:
949974
case BPF_ALU64 | BPF_MOD | BPF_K:
950975
emit_a64_mov_i(is64, tmp2, imm, ctx);
951-
emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
976+
if (!off)
977+
emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
978+
else
979+
emit(A64_SDIV(is64, tmp, dst, tmp2), ctx);
952980
emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
953981
break;
954982
case BPF_ALU | BPF_LSH | BPF_K:
@@ -966,7 +994,11 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
966994

967995
/* JUMP off */
968996
case BPF_JMP | BPF_JA:
969-
jmp_offset = bpf2a64_offset(i, off, ctx);
997+
case BPF_JMP32 | BPF_JA:
998+
if (BPF_CLASS(code) == BPF_JMP)
999+
jmp_offset = bpf2a64_offset(i, off, ctx);
1000+
else
1001+
jmp_offset = bpf2a64_offset(i, imm, ctx);
9701002
check_imm26(jmp_offset);
9711003
emit(A64_B(jmp_offset), ctx);
9721004
break;
@@ -1122,7 +1154,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
11221154
return 1;
11231155
}
11241156

1125-
/* LDX: dst = *(size *)(src + off) */
1157+
/* LDX: dst = (u64)*(unsigned size *)(src + off) */
11261158
case BPF_LDX | BPF_MEM | BPF_W:
11271159
case BPF_LDX | BPF_MEM | BPF_H:
11281160
case BPF_LDX | BPF_MEM | BPF_B:
@@ -1131,36 +1163,63 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
11311163
case BPF_LDX | BPF_PROBE_MEM | BPF_W:
11321164
case BPF_LDX | BPF_PROBE_MEM | BPF_H:
11331165
case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1166+
/* LDXS: dst_reg = (s64)*(signed size *)(src_reg + off) */
1167+
case BPF_LDX | BPF_MEMSX | BPF_B:
1168+
case BPF_LDX | BPF_MEMSX | BPF_H:
1169+
case BPF_LDX | BPF_MEMSX | BPF_W:
1170+
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
1171+
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
1172+
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
11341173
if (ctx->fpb_offset > 0 && src == fp) {
11351174
src_adj = fpb;
11361175
off_adj = off + ctx->fpb_offset;
11371176
} else {
11381177
src_adj = src;
11391178
off_adj = off;
11401179
}
1180+
sign_extend = (BPF_MODE(insn->code) == BPF_MEMSX ||
1181+
BPF_MODE(insn->code) == BPF_PROBE_MEMSX);
11411182
switch (BPF_SIZE(code)) {
11421183
case BPF_W:
11431184
if (is_lsi_offset(off_adj, 2)) {
1144-
emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
1185+
if (sign_extend)
1186+
emit(A64_LDRSWI(dst, src_adj, off_adj), ctx);
1187+
else
1188+
emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
11451189
} else {
11461190
emit_a64_mov_i(1, tmp, off, ctx);
1147-
emit(A64_LDR32(dst, src, tmp), ctx);
1191+
if (sign_extend)
1192+
emit(A64_LDRSW(dst, src_adj, off_adj), ctx);
1193+
else
1194+
emit(A64_LDR32(dst, src, tmp), ctx);
11481195
}
11491196
break;
11501197
case BPF_H:
11511198
if (is_lsi_offset(off_adj, 1)) {
1152-
emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
1199+
if (sign_extend)
1200+
emit(A64_LDRSHI(dst, src_adj, off_adj), ctx);
1201+
else
1202+
emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
11531203
} else {
11541204
emit_a64_mov_i(1, tmp, off, ctx);
1155-
emit(A64_LDRH(dst, src, tmp), ctx);
1205+
if (sign_extend)
1206+
emit(A64_LDRSH(dst, src, tmp), ctx);
1207+
else
1208+
emit(A64_LDRH(dst, src, tmp), ctx);
11561209
}
11571210
break;
11581211
case BPF_B:
11591212
if (is_lsi_offset(off_adj, 0)) {
1160-
emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
1213+
if (sign_extend)
1214+
emit(A64_LDRSBI(dst, src_adj, off_adj), ctx);
1215+
else
1216+
emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
11611217
} else {
11621218
emit_a64_mov_i(1, tmp, off, ctx);
1163-
emit(A64_LDRB(dst, src, tmp), ctx);
1219+
if (sign_extend)
1220+
emit(A64_LDRSB(dst, src, tmp), ctx);
1221+
else
1222+
emit(A64_LDRB(dst, src, tmp), ctx);
11641223
}
11651224
break;
11661225
case BPF_DW:

arch/riscv/net/bpf_jit.h

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -431,11 +431,21 @@ static inline u32 rv_mulhu(u8 rd, u8 rs1, u8 rs2)
431431
return rv_r_insn(1, rs2, rs1, 3, rd, 0x33);
432432
}
433433

434+
static inline u32 rv_div(u8 rd, u8 rs1, u8 rs2)
435+
{
436+
return rv_r_insn(1, rs2, rs1, 4, rd, 0x33);
437+
}
438+
434439
static inline u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
435440
{
436441
return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
437442
}
438443

444+
static inline u32 rv_rem(u8 rd, u8 rs1, u8 rs2)
445+
{
446+
return rv_r_insn(1, rs2, rs1, 6, rd, 0x33);
447+
}
448+
439449
static inline u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
440450
{
441451
return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
@@ -501,6 +511,16 @@ static inline u32 rv_ble(u8 rs1, u8 rs2, u16 imm12_1)
501511
return rv_bge(rs2, rs1, imm12_1);
502512
}
503513

514+
static inline u32 rv_lb(u8 rd, u16 imm11_0, u8 rs1)
515+
{
516+
return rv_i_insn(imm11_0, rs1, 0, rd, 0x03);
517+
}
518+
519+
static inline u32 rv_lh(u8 rd, u16 imm11_0, u8 rs1)
520+
{
521+
return rv_i_insn(imm11_0, rs1, 1, rd, 0x03);
522+
}
523+
504524
static inline u32 rv_lw(u8 rd, u16 imm11_0, u8 rs1)
505525
{
506526
return rv_i_insn(imm11_0, rs1, 2, rd, 0x03);
@@ -766,11 +786,21 @@ static inline u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
766786
return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
767787
}
768788

789+
static inline u32 rv_divw(u8 rd, u8 rs1, u8 rs2)
790+
{
791+
return rv_r_insn(1, rs2, rs1, 4, rd, 0x3b);
792+
}
793+
769794
static inline u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
770795
{
771796
return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
772797
}
773798

799+
static inline u32 rv_remw(u8 rd, u8 rs1, u8 rs2)
800+
{
801+
return rv_r_insn(1, rs2, rs1, 6, rd, 0x3b);
802+
}
803+
774804
static inline u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
775805
{
776806
return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);

0 commit comments

Comments
 (0)