@@ -288,116 +288,6 @@ static inline void
288288irq_pm_remove_action (struct irq_desc * desc , struct irqaction * action ) { }
289289#endif
290290
291- #ifdef CONFIG_IRQ_TIMINGS
292-
293- #define IRQ_TIMINGS_SHIFT 5
294- #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT)
295- #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1)
296-
297- /**
298- * struct irq_timings - irq timings storing structure
299- * @values: a circular buffer of u64 encoded <timestamp,irq> values
300- * @count: the number of elements in the array
301- */
302- struct irq_timings {
303- u64 values [IRQ_TIMINGS_SIZE ];
304- int count ;
305- };
306-
307- DECLARE_PER_CPU (struct irq_timings , irq_timings );
308-
309- extern void irq_timings_free (int irq );
310- extern int irq_timings_alloc (int irq );
311-
312- static inline void irq_remove_timings (struct irq_desc * desc )
313- {
314- desc -> istate &= ~IRQS_TIMINGS ;
315-
316- irq_timings_free (irq_desc_get_irq (desc ));
317- }
318-
319- static inline void irq_setup_timings (struct irq_desc * desc , struct irqaction * act )
320- {
321- int irq = irq_desc_get_irq (desc );
322- int ret ;
323-
324- /*
325- * We don't need the measurement because the idle code already
326- * knows the next expiry event.
327- */
328- if (act -> flags & __IRQF_TIMER )
329- return ;
330-
331- /*
332- * In case the timing allocation fails, we just want to warn,
333- * not fail, so letting the system boot anyway.
334- */
335- ret = irq_timings_alloc (irq );
336- if (ret ) {
337- pr_warn ("Failed to allocate irq timing stats for irq%d (%d)" ,
338- irq , ret );
339- return ;
340- }
341-
342- desc -> istate |= IRQS_TIMINGS ;
343- }
344-
345- extern void irq_timings_enable (void );
346- extern void irq_timings_disable (void );
347-
348- DECLARE_STATIC_KEY_FALSE (irq_timing_enabled );
349-
350- /*
351- * The interrupt number and the timestamp are encoded into a single
352- * u64 variable to optimize the size.
353- * 48 bit time stamp and 16 bit IRQ number is way sufficient.
354- * Who cares an IRQ after 78 hours of idle time?
355- */
356- static inline u64 irq_timing_encode (u64 timestamp , int irq )
357- {
358- return (timestamp << 16 ) | irq ;
359- }
360-
361- static inline int irq_timing_decode (u64 value , u64 * timestamp )
362- {
363- * timestamp = value >> 16 ;
364- return value & U16_MAX ;
365- }
366-
367- static __always_inline void irq_timings_push (u64 ts , int irq )
368- {
369- struct irq_timings * timings = this_cpu_ptr (& irq_timings );
370-
371- timings -> values [timings -> count & IRQ_TIMINGS_MASK ] =
372- irq_timing_encode (ts , irq );
373-
374- timings -> count ++ ;
375- }
376-
377- /*
378- * The function record_irq_time is only called in one place in the
379- * interrupts handler. We want this function always inline so the code
380- * inside is embedded in the function and the static key branching
381- * code can act at the higher level. Without the explicit
382- * __always_inline we can end up with a function call and a small
383- * overhead in the hotpath for nothing.
384- */
385- static __always_inline void record_irq_time (struct irq_desc * desc )
386- {
387- if (!static_branch_likely (& irq_timing_enabled ))
388- return ;
389-
390- if (desc -> istate & IRQS_TIMINGS )
391- irq_timings_push (local_clock (), irq_desc_get_irq (desc ));
392- }
393- #else
394- static inline void irq_remove_timings (struct irq_desc * desc ) {}
395- static inline void irq_setup_timings (struct irq_desc * desc ,
396- struct irqaction * act ) {};
397- static inline void record_irq_time (struct irq_desc * desc ) {}
398- #endif /* CONFIG_IRQ_TIMINGS */
399-
400-
401291#ifdef CONFIG_GENERIC_IRQ_CHIP
402292void irq_init_generic_chip (struct irq_chip_generic * gc , const char * name ,
403293 int num_ct , unsigned int irq_base ,
0 commit comments