|
49 | 49 | #include <asm/cacheflush.h> |
50 | 50 | #include <asm/exec.h> |
51 | 51 | #include <asm/fpsimd.h> |
| 52 | +#include <asm/gcs.h> |
52 | 53 | #include <asm/mmu_context.h> |
53 | 54 | #include <asm/mte.h> |
54 | 55 | #include <asm/processor.h> |
@@ -280,13 +281,33 @@ static void flush_poe(void) |
280 | 281 | write_sysreg_s(POR_EL0_INIT, SYS_POR_EL0); |
281 | 282 | } |
282 | 283 |
|
| 284 | +#ifdef CONFIG_ARM64_GCS |
| 285 | + |
| 286 | +static void flush_gcs(void) |
| 287 | +{ |
| 288 | + if (!system_supports_gcs()) |
| 289 | + return; |
| 290 | + |
| 291 | + gcs_free(current); |
| 292 | + current->thread.gcs_el0_mode = 0; |
| 293 | + write_sysreg_s(GCSCRE0_EL1_nTR, SYS_GCSCRE0_EL1); |
| 294 | + write_sysreg_s(0, SYS_GCSPR_EL0); |
| 295 | +} |
| 296 | + |
| 297 | +#else |
| 298 | + |
| 299 | +static void flush_gcs(void) { } |
| 300 | + |
| 301 | +#endif |
| 302 | + |
283 | 303 | void flush_thread(void) |
284 | 304 | { |
285 | 305 | fpsimd_flush_thread(); |
286 | 306 | tls_thread_flush(); |
287 | 307 | flush_ptrace_hw_breakpoint(current); |
288 | 308 | flush_tagged_addr_state(); |
289 | 309 | flush_poe(); |
| 310 | + flush_gcs(); |
290 | 311 | } |
291 | 312 |
|
292 | 313 | void arch_release_task_struct(struct task_struct *tsk) |
@@ -484,6 +505,46 @@ static void entry_task_switch(struct task_struct *next) |
484 | 505 | __this_cpu_write(__entry_task, next); |
485 | 506 | } |
486 | 507 |
|
| 508 | +#ifdef CONFIG_ARM64_GCS |
| 509 | + |
| 510 | +void gcs_preserve_current_state(void) |
| 511 | +{ |
| 512 | + current->thread.gcspr_el0 = read_sysreg_s(SYS_GCSPR_EL0); |
| 513 | +} |
| 514 | + |
| 515 | +static void gcs_thread_switch(struct task_struct *next) |
| 516 | +{ |
| 517 | + if (!system_supports_gcs()) |
| 518 | + return; |
| 519 | + |
| 520 | + /* GCSPR_EL0 is always readable */ |
| 521 | + gcs_preserve_current_state(); |
| 522 | + write_sysreg_s(next->thread.gcspr_el0, SYS_GCSPR_EL0); |
| 523 | + |
| 524 | + if (current->thread.gcs_el0_mode != next->thread.gcs_el0_mode) |
| 525 | + gcs_set_el0_mode(next); |
| 526 | + |
| 527 | + /* |
| 528 | + * Ensure that GCS memory effects of the 'prev' thread are |
| 529 | + * ordered before other memory accesses with release semantics |
| 530 | + * (or preceded by a DMB) on the current PE. In addition, any |
| 531 | + * memory accesses with acquire semantics (or succeeded by a |
| 532 | + * DMB) are ordered before GCS memory effects of the 'next' |
| 533 | + * thread. This will ensure that the GCS memory effects are |
| 534 | + * visible to other PEs in case of migration. |
| 535 | + */ |
| 536 | + if (task_gcs_el0_enabled(current) || task_gcs_el0_enabled(next)) |
| 537 | + gcsb_dsync(); |
| 538 | +} |
| 539 | + |
| 540 | +#else |
| 541 | + |
| 542 | +static void gcs_thread_switch(struct task_struct *next) |
| 543 | +{ |
| 544 | +} |
| 545 | + |
| 546 | +#endif |
| 547 | + |
487 | 548 | /* |
488 | 549 | * Handle sysreg updates for ARM erratum 1418040 which affects the 32bit view of |
489 | 550 | * CNTVCT, various other errata which require trapping all CNTVCT{,_EL0} |
@@ -580,6 +641,7 @@ struct task_struct *__switch_to(struct task_struct *prev, |
580 | 641 | cntkctl_thread_switch(prev, next); |
581 | 642 | ptrauth_thread_switch_user(next); |
582 | 643 | permission_overlay_switch(next); |
| 644 | + gcs_thread_switch(next); |
583 | 645 |
|
584 | 646 | /* |
585 | 647 | * Complete any pending TLB or cache maintenance on this CPU in case |
|
0 commit comments