@@ -682,7 +682,6 @@ void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
682682
683683static int do_timer_gettime (timer_t timer_id , struct itimerspec64 * setting )
684684{
685- const struct k_clock * kc ;
686685 struct k_itimer * timr ;
687686 unsigned long flags ;
688687 int ret = 0 ;
@@ -692,11 +691,7 @@ static int do_timer_gettime(timer_t timer_id, struct itimerspec64 *setting)
692691 return - EINVAL ;
693692
694693 memset (setting , 0 , sizeof (* setting ));
695- kc = timr -> kclock ;
696- if (WARN_ON_ONCE (!kc || !kc -> timer_get ))
697- ret = - EINVAL ;
698- else
699- kc -> timer_get (timr , setting );
694+ timr -> kclock -> timer_get (timr , setting );
700695
701696 unlock_timer (timr , flags );
702697 return ret ;
@@ -824,7 +819,6 @@ static void common_timer_wait_running(struct k_itimer *timer)
824819static struct k_itimer * timer_wait_running (struct k_itimer * timer ,
825820 unsigned long * flags )
826821{
827- const struct k_clock * kc = READ_ONCE (timer -> kclock );
828822 timer_t timer_id = READ_ONCE (timer -> it_id );
829823
830824 /* Prevent kfree(timer) after dropping the lock */
@@ -835,8 +829,7 @@ static struct k_itimer *timer_wait_running(struct k_itimer *timer,
835829 * kc->timer_wait_running() might drop RCU lock. So @timer
836830 * cannot be touched anymore after the function returns!
837831 */
838- if (!WARN_ON_ONCE (!kc -> timer_wait_running ))
839- kc -> timer_wait_running (timer );
832+ timer -> kclock -> timer_wait_running (timer );
840833
841834 rcu_read_unlock ();
842835 /* Relock the timer. It might be not longer hashed. */
@@ -899,7 +892,6 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
899892 struct itimerspec64 * new_spec64 ,
900893 struct itimerspec64 * old_spec64 )
901894{
902- const struct k_clock * kc ;
903895 struct k_itimer * timr ;
904896 unsigned long flags ;
905897 int error ;
@@ -922,11 +914,7 @@ static int do_timer_settime(timer_t timer_id, int tmr_flags,
922914 /* Prevent signal delivery and rearming. */
923915 timr -> it_signal_seq ++ ;
924916
925- kc = timr -> kclock ;
926- if (WARN_ON_ONCE (!kc || !kc -> timer_set ))
927- error = - EINVAL ;
928- else
929- error = kc -> timer_set (timr , tmr_flags , new_spec64 , old_spec64 );
917+ error = timr -> kclock -> timer_set (timr , tmr_flags , new_spec64 , old_spec64 );
930918
931919 if (error == TIMER_RETRY ) {
932920 // We already got the old time...
@@ -1008,18 +996,6 @@ static inline void posix_timer_cleanup_ignored(struct k_itimer *tmr)
1008996 }
1009997}
1010998
1011- static inline int timer_delete_hook (struct k_itimer * timer )
1012- {
1013- const struct k_clock * kc = timer -> kclock ;
1014-
1015- /* Prevent signal delivery and rearming. */
1016- timer -> it_signal_seq ++ ;
1017-
1018- if (WARN_ON_ONCE (!kc || !kc -> timer_del ))
1019- return - EINVAL ;
1020- return kc -> timer_del (timer );
1021- }
1022-
1023999/* Delete a POSIX.1b interval timer. */
10241000SYSCALL_DEFINE1 (timer_delete , timer_t , timer_id )
10251001{
@@ -1032,7 +1008,10 @@ SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
10321008 if (!timer )
10331009 return - EINVAL ;
10341010
1035- if (unlikely (timer_delete_hook (timer ) == TIMER_RETRY )) {
1011+ /* Prevent signal delivery and rearming. */
1012+ timer -> it_signal_seq ++ ;
1013+
1014+ if (unlikely (timer -> kclock -> timer_del (timer ) == TIMER_RETRY )) {
10361015 /* Unlocks and relocks the timer if it still exists */
10371016 timer = timer_wait_running (timer , & flags );
10381017 goto retry_delete ;
@@ -1078,7 +1057,7 @@ static void itimer_delete(struct k_itimer *timer)
10781057 * mechanism. Worse, that timer mechanism might run the expiry
10791058 * function concurrently.
10801059 */
1081- if (timer_delete_hook (timer ) == TIMER_RETRY ) {
1060+ if (timer -> kclock -> timer_del (timer ) == TIMER_RETRY ) {
10821061 /*
10831062 * Timer is expired concurrently, prevent livelocks
10841063 * and pointless spinning on RT.
0 commit comments