@@ -274,18 +274,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
274274 * to generate better code.
275275 */
276276#ifdef CONFIG_LOCKDEP
277- #define __INIT_WORK (_work , _func , _onstack ) \
277+ #define __INIT_WORK_KEY (_work , _func , _onstack , _key ) \
278278 do { \
279- static struct lock_class_key __key; \
280- \
281279 __init_work((_work), _onstack); \
282280 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
283- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key , 0); \
281+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key) , 0); \
284282 INIT_LIST_HEAD(&(_work)->entry); \
285283 (_work)->func = (_func); \
286284 } while (0)
287285#else
288- #define __INIT_WORK (_work , _func , _onstack ) \
286+ #define __INIT_WORK_KEY (_work , _func , _onstack , _key ) \
289287 do { \
290288 __init_work((_work), _onstack); \
291289 (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
@@ -294,12 +292,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
294292 } while (0)
295293#endif
296294
295+ #define __INIT_WORK (_work , _func , _onstack ) \
296+ do { \
297+ static __maybe_unused struct lock_class_key __key; \
298+ \
299+ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
300+ } while (0)
301+
297302#define INIT_WORK (_work , _func ) \
298303 __INIT_WORK((_work), (_func), 0)
299304
300305#define INIT_WORK_ONSTACK (_work , _func ) \
301306 __INIT_WORK((_work), (_func), 1)
302307
308+ #define INIT_WORK_ONSTACK_KEY (_work , _func , _key ) \
309+ __INIT_WORK_KEY((_work), (_func), 1, _key)
310+
303311#define __INIT_DELAYED_WORK (_work , _func , _tflags ) \
304312 do { \
305313 INIT_WORK(&(_work)->work, (_func)); \
@@ -693,8 +701,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
693701 return fn (arg );
694702}
695703#else
696- long work_on_cpu (int cpu , long (* fn )(void * ), void * arg );
697- long work_on_cpu_safe (int cpu , long (* fn )(void * ), void * arg );
704+ long work_on_cpu_key (int cpu , long (* fn )(void * ),
705+ void * arg , struct lock_class_key * key );
706+ /*
707+ * A new key is defined for each caller to make sure the work
708+ * associated with the function doesn't share its locking class.
709+ */
710+ #define work_on_cpu (_cpu , _fn , _arg ) \
711+ ({ \
712+ static struct lock_class_key __key; \
713+ \
714+ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
715+ })
716+
717+ long work_on_cpu_safe_key (int cpu , long (* fn )(void * ),
718+ void * arg , struct lock_class_key * key );
719+
720+ /*
721+ * A new key is defined for each caller to make sure the work
722+ * associated with the function doesn't share its locking class.
723+ */
724+ #define work_on_cpu_safe (_cpu , _fn , _arg ) \
725+ ({ \
726+ static struct lock_class_key __key; \
727+ \
728+ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
729+ })
698730#endif /* CONFIG_SMP */
699731
700732#ifdef CONFIG_FREEZER
0 commit comments