File tree Expand file tree Collapse file tree
Expand file tree Collapse file tree Original file line number Diff line number Diff line change 1717#endif
1818
1919#include <asm/cmpxchg.h>
20- #include <asm/barrier.h>
2120
2221#define __atomic_acquire_fence () \
2322 __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
Original file line number Diff line number Diff line change 1111#define _ASM_RISCV_BARRIER_H
1212
1313#ifndef __ASSEMBLY__
14+ #include <asm/fence.h>
1415
1516#define nop () __asm__ __volatile__ ("nop")
1617#define __nops (n ) ".rept " #n "\nnop\n.endr\n"
1718#define nops (n ) __asm__ __volatile__ (__nops(n))
1819
19- #define RISCV_FENCE (p , s ) \
20- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
2120
2221/* These barriers need to enforce ordering on both devices or memory. */
2322#define __mb () RISCV_FENCE(iorw, iorw)
Original file line number Diff line number Diff line change 88
99#include <linux/bug.h>
1010
11- #include <asm/barrier.h>
1211#include <asm/fence.h>
1312
1413#define __xchg_relaxed (ptr , new , size ) \
Original file line number Diff line number Diff line change 11#ifndef _ASM_RISCV_FENCE_H
22#define _ASM_RISCV_FENCE_H
33
4+ #define RISCV_FENCE_ASM (p , s ) "\tfence " #p "," #s "\n"
5+ #define RISCV_FENCE (p , s ) \
6+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
7+
48#ifdef CONFIG_SMP
5- #define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
6- #define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
7- #define RISCV_FULL_BARRIER "\tfence rw, rw\n"
9+ #define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r , rw)
10+ #define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM( rw, w)
11+ #define RISCV_FULL_BARRIER RISCV_FENCE_ASM( rw, rw)
812#else
913#define RISCV_ACQUIRE_BARRIER
1014#define RISCV_RELEASE_BARRIER
Original file line number Diff line number Diff line change 4747 * sufficient to ensure this works sanely on controllers that support I/O
4848 * writes.
4949 */
50- #define __io_pbr () __asm__ __volatile__ ("fence io,i" : : : "memory");
51- #define __io_par (v ) __asm__ __volatile__ ("fence i,ior" : : : "memory");
52- #define __io_pbw () __asm__ __volatile__ ("fence iow,o" : : : "memory");
53- #define __io_paw () __asm__ __volatile__ ("fence o,io" : : : "memory");
50+ #define __io_pbr () RISCV_FENCE( io, i)
51+ #define __io_par (v ) RISCV_FENCE(i, ior)
52+ #define __io_pbw () RISCV_FENCE( iow, o)
53+ #define __io_paw () RISCV_FENCE(o, io)
5454
5555/*
5656 * Accesses from a single hart to a single I/O address must be ordered. This
Original file line number Diff line number Diff line change 1212#define _ASM_RISCV_MMIO_H
1313
1414#include <linux/types.h>
15+ #include <asm/fence.h>
1516#include <asm/mmiowb.h>
1617
1718/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
131132 * doesn't define any ordering between the memory space and the I/O space.
132133 */
133134#define __io_br () do {} while (0)
134- #define __io_ar (v ) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); } )
135- #define __io_bw () ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); } )
135+ #define __io_ar (v ) RISCV_FENCE(i, ir )
136+ #define __io_bw () RISCV_FENCE(w, o )
136137#define __io_aw () mmiowb_set_pending()
137138
138139#define readb (c ) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
Original file line number Diff line number Diff line change 77 * "o,w" is sufficient to ensure that all writes to the device have completed
88 * before the write to the spinlock is allowed to commit.
99 */
10- #define mmiowb () __asm__ __volatile__ ("fence o,w" : : : "memory");
10+ #define mmiowb () RISCV_FENCE(o, w)
1111
1212#include <linux/smp.h>
1313#include <asm-generic/mmiowb.h>
You can’t perform that action at this time.
0 commit comments