@@ -21,7 +21,7 @@ static inline void gcsstr(u64 *addr, u64 val)
2121 register u64 * _addr __asm__ ("x0" ) = addr ;
2222 register long _val __asm__ ("x1" ) = val ;
2323
24- /* GCSSTTR x1, x0 */
24+ /* GCSSTTR x1, [x0] */
2525 asm volatile (
2626 ".inst 0xd91f1c01\n"
2727 :
@@ -81,6 +81,82 @@ static inline int gcs_check_locked(struct task_struct *task,
8181 return 0 ;
8282}
8383
84+ static inline int gcssttr (unsigned long __user * addr , unsigned long val )
85+ {
86+ register unsigned long __user * _addr __asm__ ("x0" ) = addr ;
87+ register unsigned long _val __asm__ ("x1" ) = val ;
88+ int err = 0 ;
89+
90+ /* GCSSTTR x1, [x0] */
91+ asm volatile (
92+ "1: .inst 0xd91f1c01\n"
93+ "2: \n"
94+ _ASM_EXTABLE_UACCESS_ERR (1b , 2b , %w0 )
95+ : "+ r " (err)
96+ : " rZ " (_val), " r " (_addr)
97+ : " memory ");
98+
99+ return err ;
100+ }
101+
102+ static inline void put_user_gcs (unsigned long val , unsigned long __user * addr ,
103+ int * err )
104+ {
105+ int ret ;
106+
107+ if (!access_ok ((char __user * )addr , sizeof (u64 ))) {
108+ * err = - EFAULT ;
109+ return ;
110+ }
111+
112+ uaccess_ttbr0_enable ();
113+ ret = gcssttr (addr , val );
114+ if (ret != 0 )
115+ * err = ret ;
116+ uaccess_ttbr0_disable ();
117+ }
118+
119+ static inline void push_user_gcs (unsigned long val , int * err )
120+ {
121+ u64 gcspr = read_sysreg_s (SYS_GCSPR_EL0 );
122+
123+ gcspr -= sizeof (u64 );
124+ put_user_gcs (val , (unsigned long __user * )gcspr , err );
125+ if (!* err )
126+ write_sysreg_s (gcspr , SYS_GCSPR_EL0 );
127+ }
128+
129+ /*
130+ * Unlike put/push_user_gcs() above, get/pop_user_gsc() doesn't
131+ * validate the GCS permission is set on the page being read. This
132+ * differs from how the hardware works when it consumes data stored at
133+ * GCSPR. Callers should ensure this is acceptable.
134+ */
135+ static inline u64 get_user_gcs (unsigned long __user * addr , int * err )
136+ {
137+ unsigned long ret ;
138+ u64 load = 0 ;
139+
140+ /* Ensure previous GCS operation are visible before we read the page */
141+ gcsb_dsync ();
142+ ret = copy_from_user (& load , addr , sizeof (load ));
143+ if (ret != 0 )
144+ * err = ret ;
145+ return load ;
146+ }
147+
148+ static inline u64 pop_user_gcs (int * err )
149+ {
150+ u64 gcspr = read_sysreg_s (SYS_GCSPR_EL0 );
151+ u64 read_val ;
152+
153+ read_val = get_user_gcs ((__force unsigned long __user * )gcspr , err );
154+ if (!* err )
155+ write_sysreg_s (gcspr + sizeof (u64 ), SYS_GCSPR_EL0 );
156+
157+ return read_val ;
158+ }
159+
84160#else
85161
86162static inline bool task_gcs_el0_enabled (struct task_struct * task )
@@ -91,6 +167,10 @@ static inline bool task_gcs_el0_enabled(struct task_struct *task)
91167static inline void gcs_set_el0_mode (struct task_struct * task ) { }
92168static inline void gcs_free (struct task_struct * task ) { }
93169static inline void gcs_preserve_current_state (void ) { }
170+ static inline void put_user_gcs (unsigned long val , unsigned long __user * addr ,
171+ int * err ) { }
172+ static inline void push_user_gcs (unsigned long val , int * err ) { }
173+
94174static inline unsigned long gcs_alloc_thread_stack (struct task_struct * tsk ,
95175 const struct kernel_clone_args * args )
96176{
@@ -101,6 +181,15 @@ static inline int gcs_check_locked(struct task_struct *task,
101181{
102182 return 0 ;
103183}
184+ static inline u64 get_user_gcs (unsigned long __user * addr , int * err )
185+ {
186+ * err = - EFAULT ;
187+ return 0 ;
188+ }
189+ static inline u64 pop_user_gcs (int * err )
190+ {
191+ return 0 ;
192+ }
104193
105194#endif
106195
0 commit comments