|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
| 2 | + |
| 3 | +#include <linux/jump_label.h> |
| 4 | +#include <linux/kernel.h> |
| 5 | +#include <linux/memory.h> |
| 6 | +#include <linux/mutex.h> |
| 7 | +#include <linux/uaccess.h> |
| 8 | +#include <asm/cacheflush.h> |
| 9 | + |
| 10 | +#define NOP32_HI 0xc400 |
| 11 | +#define NOP32_LO 0x4820 |
| 12 | +#define BSR_LINK 0xe000 |
| 13 | + |
| 14 | +void arch_jump_label_transform(struct jump_entry *entry, |
| 15 | + enum jump_label_type type) |
| 16 | +{ |
| 17 | + unsigned long addr = jump_entry_code(entry); |
| 18 | + u16 insn[2]; |
| 19 | + int ret = 0; |
| 20 | + |
| 21 | + if (type == JUMP_LABEL_JMP) { |
| 22 | + long offset = jump_entry_target(entry) - jump_entry_code(entry); |
| 23 | + |
| 24 | + if (WARN_ON(offset & 1 || offset < -67108864 || offset >= 67108864)) |
| 25 | + return; |
| 26 | + |
| 27 | + offset = offset >> 1; |
| 28 | + |
| 29 | + insn[0] = BSR_LINK | |
| 30 | + ((uint16_t)((unsigned long) offset >> 16) & 0x3ff); |
| 31 | + insn[1] = (uint16_t)((unsigned long) offset & 0xffff); |
| 32 | + } else { |
| 33 | + insn[0] = NOP32_HI; |
| 34 | + insn[1] = NOP32_LO; |
| 35 | + } |
| 36 | + |
| 37 | + ret = copy_to_kernel_nofault((void *)addr, insn, 4); |
| 38 | + WARN_ON(ret); |
| 39 | + |
| 40 | + flush_icache_range(addr, addr + 4); |
| 41 | +} |
| 42 | + |
| 43 | +void arch_jump_label_transform_static(struct jump_entry *entry, |
| 44 | + enum jump_label_type type) |
| 45 | +{ |
| 46 | + /* |
| 47 | + * We use the same instructions in the arch_static_branch and |
| 48 | + * arch_static_branch_jump inline functions, so there's no |
| 49 | + * need to patch them up here. |
| 50 | + * The core will call arch_jump_label_transform when those |
| 51 | + * instructions need to be replaced. |
| 52 | + */ |
| 53 | + arch_jump_label_transform(entry, type); |
| 54 | +} |
0 commit comments