Skip to content

Commit 20a759d

Browse files
puranjaymohanAlexei Starovoitov
authored andcommitted
riscv, bpf: make some atomic operations fully ordered
The BPF atomic operations with the BPF_FETCH modifier along with BPF_XCHG and BPF_CMPXCHG are fully ordered but the RISC-V JIT implements all atomic operations except BPF_CMPXCHG with relaxed ordering. Section 8.1 of the "The RISC-V Instruction Set Manual Volume I: Unprivileged ISA" [1], titled, "Specifying Ordering of Atomic Instructions" says: | To provide more efficient support for release consistency [5], each | atomic instruction has two bits, aq and rl, used to specify additional | memory ordering constraints as viewed by other RISC-V harts. and | If only the aq bit is set, the atomic memory operation is treated as | an acquire access. | If only the rl bit is set, the atomic memory operation is treated as a | release access. | | If both the aq and rl bits are set, the atomic memory operation is | sequentially consistent. Fix this by setting both aq and rl bits as 1 for operations with BPF_FETCH and BPF_XCHG. [1] https://riscv.org/wp-content/uploads/2017/05/riscv-spec-v2.2.pdf Fixes: dd642cc ("riscv, bpf: Implement more atomic operations for RV64") Signed-off-by: Puranjay Mohan <puranjay@kernel.org> Reviewed-by: Pu Lehui <pulehui@huawei.com> Link: https://lore.kernel.org/r/20240505201633.123115-1-puranjay@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 80c5a07 commit 20a759d

1 file changed

Lines changed: 10 additions & 10 deletions

File tree

arch/riscv/net/bpf_jit_comp64.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -504,33 +504,33 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
504504
break;
505505
/* src_reg = atomic_fetch_<op>(dst_reg + off16, src_reg) */
506506
case BPF_ADD | BPF_FETCH:
507-
emit(is64 ? rv_amoadd_d(rs, rs, rd, 0, 0) :
508-
rv_amoadd_w(rs, rs, rd, 0, 0), ctx);
507+
emit(is64 ? rv_amoadd_d(rs, rs, rd, 1, 1) :
508+
rv_amoadd_w(rs, rs, rd, 1, 1), ctx);
509509
if (!is64)
510510
emit_zextw(rs, rs, ctx);
511511
break;
512512
case BPF_AND | BPF_FETCH:
513-
emit(is64 ? rv_amoand_d(rs, rs, rd, 0, 0) :
514-
rv_amoand_w(rs, rs, rd, 0, 0), ctx);
513+
emit(is64 ? rv_amoand_d(rs, rs, rd, 1, 1) :
514+
rv_amoand_w(rs, rs, rd, 1, 1), ctx);
515515
if (!is64)
516516
emit_zextw(rs, rs, ctx);
517517
break;
518518
case BPF_OR | BPF_FETCH:
519-
emit(is64 ? rv_amoor_d(rs, rs, rd, 0, 0) :
520-
rv_amoor_w(rs, rs, rd, 0, 0), ctx);
519+
emit(is64 ? rv_amoor_d(rs, rs, rd, 1, 1) :
520+
rv_amoor_w(rs, rs, rd, 1, 1), ctx);
521521
if (!is64)
522522
emit_zextw(rs, rs, ctx);
523523
break;
524524
case BPF_XOR | BPF_FETCH:
525-
emit(is64 ? rv_amoxor_d(rs, rs, rd, 0, 0) :
526-
rv_amoxor_w(rs, rs, rd, 0, 0), ctx);
525+
emit(is64 ? rv_amoxor_d(rs, rs, rd, 1, 1) :
526+
rv_amoxor_w(rs, rs, rd, 1, 1), ctx);
527527
if (!is64)
528528
emit_zextw(rs, rs, ctx);
529529
break;
530530
/* src_reg = atomic_xchg(dst_reg + off16, src_reg); */
531531
case BPF_XCHG:
532-
emit(is64 ? rv_amoswap_d(rs, rs, rd, 0, 0) :
533-
rv_amoswap_w(rs, rs, rd, 0, 0), ctx);
532+
emit(is64 ? rv_amoswap_d(rs, rs, rd, 1, 1) :
533+
rv_amoswap_w(rs, rs, rd, 1, 1), ctx);
534534
if (!is64)
535535
emit_zextw(rs, rs, ctx);
536536
break;

0 commit comments

Comments
 (0)