88 */
99
1010#include <linux/kernel.h>
11+ #include <linux/bitops.h>
12+ #include <linux/irq.h>
13+ #include <linux/irqdomain.h>
1114#include <linux/kvm_host.h>
15+ #include <linux/percpu.h>
16+ #include <linux/spinlock.h>
1217#include <asm/hwcap.h>
1318
19+ struct aia_hgei_control {
20+ raw_spinlock_t lock ;
21+ unsigned long free_bitmap ;
22+ struct kvm_vcpu * owners [BITS_PER_LONG ];
23+ };
24+ static DEFINE_PER_CPU (struct aia_hgei_control , aia_hgei ) ;
25+ static int hgei_parent_irq ;
26+
27+ unsigned int kvm_riscv_aia_nr_hgei ;
1428DEFINE_STATIC_KEY_FALSE (kvm_riscv_aia_available );
1529
30+ static int aia_find_hgei (struct kvm_vcpu * owner )
31+ {
32+ int i , hgei ;
33+ unsigned long flags ;
34+ struct aia_hgei_control * hgctrl = get_cpu_ptr (& aia_hgei );
35+
36+ raw_spin_lock_irqsave (& hgctrl -> lock , flags );
37+
38+ hgei = -1 ;
39+ for (i = 1 ; i <= kvm_riscv_aia_nr_hgei ; i ++ ) {
40+ if (hgctrl -> owners [i ] == owner ) {
41+ hgei = i ;
42+ break ;
43+ }
44+ }
45+
46+ raw_spin_unlock_irqrestore (& hgctrl -> lock , flags );
47+
48+ put_cpu_ptr (& aia_hgei );
49+ return hgei ;
50+ }
51+
1652static void aia_set_hvictl (bool ext_irq_pending )
1753{
1854 unsigned long hvictl ;
@@ -56,6 +92,7 @@ void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
5692
5793bool kvm_riscv_vcpu_aia_has_interrupts (struct kvm_vcpu * vcpu , u64 mask )
5894{
95+ int hgei ;
5996 unsigned long seip ;
6097
6198 if (!kvm_riscv_aia_available ())
@@ -74,6 +111,10 @@ bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask)
74111 if (!kvm_riscv_aia_initialized (vcpu -> kvm ) || !seip )
75112 return false;
76113
114+ hgei = aia_find_hgei (vcpu );
115+ if (hgei > 0 )
116+ return !!(csr_read (CSR_HGEIP ) & BIT (hgei ));
117+
77118 return false;
78119}
79120
@@ -348,6 +389,143 @@ int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
348389 return KVM_INSN_EXIT_TO_USER_SPACE ;
349390}
350391
392+ int kvm_riscv_aia_alloc_hgei (int cpu , struct kvm_vcpu * owner ,
393+ void __iomem * * hgei_va , phys_addr_t * hgei_pa )
394+ {
395+ int ret = - ENOENT ;
396+ unsigned long flags ;
397+ struct aia_hgei_control * hgctrl = per_cpu_ptr (& aia_hgei , cpu );
398+
399+ if (!kvm_riscv_aia_available () || !hgctrl )
400+ return - ENODEV ;
401+
402+ raw_spin_lock_irqsave (& hgctrl -> lock , flags );
403+
404+ if (hgctrl -> free_bitmap ) {
405+ ret = __ffs (hgctrl -> free_bitmap );
406+ hgctrl -> free_bitmap &= ~BIT (ret );
407+ hgctrl -> owners [ret ] = owner ;
408+ }
409+
410+ raw_spin_unlock_irqrestore (& hgctrl -> lock , flags );
411+
412+ /* TODO: To be updated later by AIA in-kernel irqchip support */
413+ if (hgei_va )
414+ * hgei_va = NULL ;
415+ if (hgei_pa )
416+ * hgei_pa = 0 ;
417+
418+ return ret ;
419+ }
420+
421+ void kvm_riscv_aia_free_hgei (int cpu , int hgei )
422+ {
423+ unsigned long flags ;
424+ struct aia_hgei_control * hgctrl = per_cpu_ptr (& aia_hgei , cpu );
425+
426+ if (!kvm_riscv_aia_available () || !hgctrl )
427+ return ;
428+
429+ raw_spin_lock_irqsave (& hgctrl -> lock , flags );
430+
431+ if (hgei > 0 && hgei <= kvm_riscv_aia_nr_hgei ) {
432+ if (!(hgctrl -> free_bitmap & BIT (hgei ))) {
433+ hgctrl -> free_bitmap |= BIT (hgei );
434+ hgctrl -> owners [hgei ] = NULL ;
435+ }
436+ }
437+
438+ raw_spin_unlock_irqrestore (& hgctrl -> lock , flags );
439+ }
440+
441+ void kvm_riscv_aia_wakeon_hgei (struct kvm_vcpu * owner , bool enable )
442+ {
443+ int hgei ;
444+
445+ if (!kvm_riscv_aia_available ())
446+ return ;
447+
448+ hgei = aia_find_hgei (owner );
449+ if (hgei > 0 ) {
450+ if (enable )
451+ csr_set (CSR_HGEIE , BIT (hgei ));
452+ else
453+ csr_clear (CSR_HGEIE , BIT (hgei ));
454+ }
455+ }
456+
457+ static irqreturn_t hgei_interrupt (int irq , void * dev_id )
458+ {
459+ int i ;
460+ unsigned long hgei_mask , flags ;
461+ struct aia_hgei_control * hgctrl = get_cpu_ptr (& aia_hgei );
462+
463+ hgei_mask = csr_read (CSR_HGEIP ) & csr_read (CSR_HGEIE );
464+ csr_clear (CSR_HGEIE , hgei_mask );
465+
466+ raw_spin_lock_irqsave (& hgctrl -> lock , flags );
467+
468+ for_each_set_bit (i , & hgei_mask , BITS_PER_LONG ) {
469+ if (hgctrl -> owners [i ])
470+ kvm_vcpu_kick (hgctrl -> owners [i ]);
471+ }
472+
473+ raw_spin_unlock_irqrestore (& hgctrl -> lock , flags );
474+
475+ put_cpu_ptr (& aia_hgei );
476+ return IRQ_HANDLED ;
477+ }
478+
479+ static int aia_hgei_init (void )
480+ {
481+ int cpu , rc ;
482+ struct irq_domain * domain ;
483+ struct aia_hgei_control * hgctrl ;
484+
485+ /* Initialize per-CPU guest external interrupt line management */
486+ for_each_possible_cpu (cpu ) {
487+ hgctrl = per_cpu_ptr (& aia_hgei , cpu );
488+ raw_spin_lock_init (& hgctrl -> lock );
489+ if (kvm_riscv_aia_nr_hgei ) {
490+ hgctrl -> free_bitmap =
491+ BIT (kvm_riscv_aia_nr_hgei + 1 ) - 1 ;
492+ hgctrl -> free_bitmap &= ~BIT (0 );
493+ } else
494+ hgctrl -> free_bitmap = 0 ;
495+ }
496+
497+ /* Find INTC irq domain */
498+ domain = irq_find_matching_fwnode (riscv_get_intc_hwnode (),
499+ DOMAIN_BUS_ANY );
500+ if (!domain ) {
501+ kvm_err ("unable to find INTC domain\n" );
502+ return - ENOENT ;
503+ }
504+
505+ /* Map per-CPU SGEI interrupt from INTC domain */
506+ hgei_parent_irq = irq_create_mapping (domain , IRQ_S_GEXT );
507+ if (!hgei_parent_irq ) {
508+ kvm_err ("unable to map SGEI IRQ\n" );
509+ return - ENOMEM ;
510+ }
511+
512+ /* Request per-CPU SGEI interrupt */
513+ rc = request_percpu_irq (hgei_parent_irq , hgei_interrupt ,
514+ "riscv-kvm" , & aia_hgei );
515+ if (rc ) {
516+ kvm_err ("failed to request SGEI IRQ\n" );
517+ return rc ;
518+ }
519+
520+ return 0 ;
521+ }
522+
523+ static void aia_hgei_exit (void )
524+ {
525+ /* Free per-CPU SGEI interrupt */
526+ free_percpu_irq (hgei_parent_irq , & aia_hgei );
527+ }
528+
351529void kvm_riscv_aia_enable (void )
352530{
353531 if (!kvm_riscv_aia_available ())
@@ -362,21 +540,82 @@ void kvm_riscv_aia_enable(void)
362540 csr_write (CSR_HVIPRIO1H , 0x0 );
363541 csr_write (CSR_HVIPRIO2H , 0x0 );
364542#endif
543+
544+ /* Enable per-CPU SGEI interrupt */
545+ enable_percpu_irq (hgei_parent_irq ,
546+ irq_get_trigger_type (hgei_parent_irq ));
547+ csr_set (CSR_HIE , BIT (IRQ_S_GEXT ));
365548}
366549
367550void kvm_riscv_aia_disable (void )
368551{
552+ int i ;
553+ unsigned long flags ;
554+ struct kvm_vcpu * vcpu ;
555+ struct aia_hgei_control * hgctrl ;
556+
369557 if (!kvm_riscv_aia_available ())
370558 return ;
559+ hgctrl = get_cpu_ptr (& aia_hgei );
560+
561+ /* Disable per-CPU SGEI interrupt */
562+ csr_clear (CSR_HIE , BIT (IRQ_S_GEXT ));
563+ disable_percpu_irq (hgei_parent_irq );
371564
372565 aia_set_hvictl (false);
566+
567+ raw_spin_lock_irqsave (& hgctrl -> lock , flags );
568+
569+ for (i = 0 ; i <= kvm_riscv_aia_nr_hgei ; i ++ ) {
570+ vcpu = hgctrl -> owners [i ];
571+ if (!vcpu )
572+ continue ;
573+
574+ /*
575+ * We release hgctrl->lock before notifying IMSIC
576+ * so that we don't have lock ordering issues.
577+ */
578+ raw_spin_unlock_irqrestore (& hgctrl -> lock , flags );
579+
580+ /* Notify IMSIC */
581+ kvm_riscv_vcpu_aia_imsic_release (vcpu );
582+
583+ /*
584+ * Wakeup VCPU if it was blocked so that it can
585+ * run on other HARTs
586+ */
587+ if (csr_read (CSR_HGEIE ) & BIT (i )) {
588+ csr_clear (CSR_HGEIE , BIT (i ));
589+ kvm_vcpu_kick (vcpu );
590+ }
591+
592+ raw_spin_lock_irqsave (& hgctrl -> lock , flags );
593+ }
594+
595+ raw_spin_unlock_irqrestore (& hgctrl -> lock , flags );
596+
597+ put_cpu_ptr (& aia_hgei );
373598}
374599
375600int kvm_riscv_aia_init (void )
376601{
602+ int rc ;
603+
377604 if (!riscv_isa_extension_available (NULL , SxAIA ))
378605 return - ENODEV ;
379606
607+ /* Figure-out number of bits in HGEIE */
608+ csr_write (CSR_HGEIE , -1UL );
609+ kvm_riscv_aia_nr_hgei = fls_long (csr_read (CSR_HGEIE ));
610+ csr_write (CSR_HGEIE , 0 );
611+ if (kvm_riscv_aia_nr_hgei )
612+ kvm_riscv_aia_nr_hgei -- ;
613+
614+ /* Initialize guest external interrupt line management */
615+ rc = aia_hgei_init ();
616+ if (rc )
617+ return rc ;
618+
380619 /* Enable KVM AIA support */
381620 static_branch_enable (& kvm_riscv_aia_available );
382621
@@ -385,4 +624,9 @@ int kvm_riscv_aia_init(void)
385624
386625void kvm_riscv_aia_exit (void )
387626{
627+ if (!kvm_riscv_aia_available ())
628+ return ;
629+
630+ /* Cleanup the HGEI state */
631+ aia_hgei_exit ();
388632}
0 commit comments