Skip to content

Commit 2647c96

Browse files
romank-msftliuw
authored andcommitted
Drivers: hv: Support establishing the confidential VMBus connection
To establish the confidential VMBus connection the CoCo VM, the guest first checks on the confidential VMBus availability, and then proceeds to initializing the communication stack. Implement that in the VMBus driver initialization. Signed-off-by: Roman Kisel <romank@linux.microsoft.com> Reviewed-by: Michael Kelley <mhklinux@outlook.com> Signed-off-by: Wei Liu <wei.liu@kernel.org>
1 parent b537794 commit 2647c96

1 file changed

Lines changed: 106 additions & 62 deletions

File tree

drivers/hv/vmbus_drv.c

Lines changed: 106 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -1057,19 +1057,20 @@ static void vmbus_onmessage_work(struct work_struct *work)
10571057
kfree(ctx);
10581058
}
10591059

1060-
void vmbus_on_msg_dpc(unsigned long data)
1060+
static void __vmbus_on_msg_dpc(void *message_page_addr)
10611061
{
1062-
struct hv_per_cpu_context *hv_cpu = (void *)data;
1063-
void *page_addr = hv_cpu->hyp_synic_message_page;
1064-
struct hv_message msg_copy, *msg = (struct hv_message *)page_addr +
1065-
VMBUS_MESSAGE_SINT;
1062+
struct hv_message msg_copy, *msg;
10661063
struct vmbus_channel_message_header *hdr;
10671064
enum vmbus_channel_message_type msgtype;
10681065
const struct vmbus_channel_message_table_entry *entry;
10691066
struct onmessage_work_context *ctx;
10701067
__u8 payload_size;
10711068
u32 message_type;
10721069

1070+
if (!message_page_addr)
1071+
return;
1072+
msg = (struct hv_message *)message_page_addr + VMBUS_MESSAGE_SINT;
1073+
10731074
/*
10741075
* 'enum vmbus_channel_message_type' is supposed to always be 'u32' as
10751076
* it is being used in 'struct vmbus_channel_message_header' definition
@@ -1195,6 +1196,14 @@ void vmbus_on_msg_dpc(unsigned long data)
11951196
vmbus_signal_eom(msg, message_type);
11961197
}
11971198

1199+
void vmbus_on_msg_dpc(unsigned long data)
1200+
{
1201+
struct hv_per_cpu_context *hv_cpu = (void *)data;
1202+
1203+
__vmbus_on_msg_dpc(hv_cpu->hyp_synic_message_page);
1204+
__vmbus_on_msg_dpc(hv_cpu->para_synic_message_page);
1205+
}
1206+
11981207
#ifdef CONFIG_PM_SLEEP
11991208
/*
12001209
* Fake RESCIND_CHANNEL messages to clean up hv_sock channels by force for
@@ -1233,28 +1242,31 @@ static void vmbus_force_channel_rescinded(struct vmbus_channel *channel)
12331242
#endif /* CONFIG_PM_SLEEP */
12341243

12351244
/*
1236-
* Schedule all channels with events pending
1245+
* Schedule all channels with events pending.
1246+
* The event page can be directly checked to get the id of
1247+
* the channel that has the interrupt pending.
12371248
*/
1238-
static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
1249+
static void vmbus_chan_sched(void *event_page_addr)
12391250
{
12401251
unsigned long *recv_int_page;
12411252
u32 maxbits, relid;
1253+
union hv_synic_event_flags *event;
12421254

1243-
/*
1244-
* The event page can be directly checked to get the id of
1245-
* the channel that has the interrupt pending.
1246-
*/
1247-
void *page_addr = hv_cpu->hyp_synic_event_page;
1248-
union hv_synic_event_flags *event
1249-
= (union hv_synic_event_flags *)page_addr +
1250-
VMBUS_MESSAGE_SINT;
1255+
if (!event_page_addr)
1256+
return;
1257+
event = (union hv_synic_event_flags *)event_page_addr + VMBUS_MESSAGE_SINT;
12511258

12521259
maxbits = HV_EVENT_FLAGS_COUNT;
12531260
recv_int_page = event->flags;
12541261

12551262
if (unlikely(!recv_int_page))
12561263
return;
12571264

1265+
/*
1266+
* Suggested-by: Michael Kelley <mhklinux@outlook.com>
1267+
* One possible optimization would be to keep track of the largest relID that's in use,
1268+
* and only scan up to that relID.
1269+
*/
12581270
for_each_set_bit(relid, recv_int_page, maxbits) {
12591271
void (*callback_fn)(void *context);
12601272
struct vmbus_channel *channel;
@@ -1318,26 +1330,35 @@ static void vmbus_chan_sched(struct hv_per_cpu_context *hv_cpu)
13181330
}
13191331
}
13201332

1321-
static void vmbus_isr(void)
1333+
static void vmbus_message_sched(struct hv_per_cpu_context *hv_cpu, void *message_page_addr)
13221334
{
1323-
struct hv_per_cpu_context *hv_cpu
1324-
= this_cpu_ptr(hv_context.cpu_context);
1325-
void *page_addr;
13261335
struct hv_message *msg;
13271336

1328-
vmbus_chan_sched(hv_cpu);
1329-
1330-
page_addr = hv_cpu->hyp_synic_message_page;
1331-
msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
1337+
if (!message_page_addr)
1338+
return;
1339+
msg = (struct hv_message *)message_page_addr + VMBUS_MESSAGE_SINT;
13321340

13331341
/* Check if there are actual msgs to be processed */
13341342
if (msg->header.message_type != HVMSG_NONE) {
13351343
if (msg->header.message_type == HVMSG_TIMER_EXPIRED) {
13361344
hv_stimer0_isr();
13371345
vmbus_signal_eom(msg, HVMSG_TIMER_EXPIRED);
1338-
} else
1346+
} else {
13391347
tasklet_schedule(&hv_cpu->msg_dpc);
1348+
}
13401349
}
1350+
}
1351+
1352+
static void vmbus_isr(void)
1353+
{
1354+
struct hv_per_cpu_context *hv_cpu
1355+
= this_cpu_ptr(hv_context.cpu_context);
1356+
1357+
vmbus_chan_sched(hv_cpu->hyp_synic_event_page);
1358+
vmbus_chan_sched(hv_cpu->para_synic_event_page);
1359+
1360+
vmbus_message_sched(hv_cpu, hv_cpu->hyp_synic_message_page);
1361+
vmbus_message_sched(hv_cpu, hv_cpu->para_synic_message_page);
13411362

13421363
add_interrupt_randomness(vmbus_interrupt);
13431364
}
@@ -1355,6 +1376,59 @@ static void vmbus_percpu_work(struct work_struct *work)
13551376
hv_synic_init(cpu);
13561377
}
13571378

1379+
static int vmbus_alloc_synic_and_connect(void)
1380+
{
1381+
int ret, cpu;
1382+
struct work_struct __percpu *works;
1383+
int hyperv_cpuhp_online;
1384+
1385+
ret = hv_synic_alloc();
1386+
if (ret < 0)
1387+
goto err_alloc;
1388+
1389+
works = alloc_percpu(struct work_struct);
1390+
if (!works) {
1391+
ret = -ENOMEM;
1392+
goto err_alloc;
1393+
}
1394+
1395+
/*
1396+
* Initialize the per-cpu interrupt state and stimer state.
1397+
* Then connect to the host.
1398+
*/
1399+
cpus_read_lock();
1400+
for_each_online_cpu(cpu) {
1401+
struct work_struct *work = per_cpu_ptr(works, cpu);
1402+
1403+
INIT_WORK(work, vmbus_percpu_work);
1404+
schedule_work_on(cpu, work);
1405+
}
1406+
1407+
for_each_online_cpu(cpu)
1408+
flush_work(per_cpu_ptr(works, cpu));
1409+
1410+
/* Register the callbacks for possible CPU online/offline'ing */
1411+
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1412+
hv_synic_init, hv_synic_cleanup);
1413+
cpus_read_unlock();
1414+
free_percpu(works);
1415+
if (ret < 0)
1416+
goto err_alloc;
1417+
hyperv_cpuhp_online = ret;
1418+
1419+
ret = vmbus_connect();
1420+
if (ret)
1421+
goto err_connect;
1422+
return 0;
1423+
1424+
err_connect:
1425+
cpuhp_remove_state(hyperv_cpuhp_online);
1426+
return -ENODEV;
1427+
err_alloc:
1428+
hv_synic_free();
1429+
return -ENOMEM;
1430+
}
1431+
13581432
/*
13591433
* vmbus_bus_init -Main vmbus driver initialization routine.
13601434
*
@@ -1365,8 +1439,7 @@ static void vmbus_percpu_work(struct work_struct *work)
13651439
*/
13661440
static int vmbus_bus_init(void)
13671441
{
1368-
int ret, cpu;
1369-
struct work_struct __percpu *works;
1442+
int ret;
13701443

13711444
ret = hv_init();
13721445
if (ret != 0) {
@@ -1401,41 +1474,15 @@ static int vmbus_bus_init(void)
14011474
}
14021475
}
14031476

1404-
ret = hv_synic_alloc();
1405-
if (ret)
1406-
goto err_alloc;
1407-
1408-
works = alloc_percpu(struct work_struct);
1409-
if (!works) {
1410-
ret = -ENOMEM;
1411-
goto err_alloc;
1412-
}
1413-
14141477
/*
1415-
* Initialize the per-cpu interrupt state and stimer state.
1416-
* Then connect to the host.
1478+
* Cache the value as getting it involves a VM exit on x86(_64), and
1479+
* doing that on each VP while initializing SynIC's wastes time.
14171480
*/
1418-
cpus_read_lock();
1419-
for_each_online_cpu(cpu) {
1420-
struct work_struct *work = per_cpu_ptr(works, cpu);
1421-
1422-
INIT_WORK(work, vmbus_percpu_work);
1423-
schedule_work_on(cpu, work);
1424-
}
1425-
1426-
for_each_online_cpu(cpu)
1427-
flush_work(per_cpu_ptr(works, cpu));
1428-
1429-
/* Register the callbacks for possible CPU online/offline'ing */
1430-
ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN, "hyperv/vmbus:online",
1431-
hv_synic_init, hv_synic_cleanup);
1432-
cpus_read_unlock();
1433-
free_percpu(works);
1434-
if (ret < 0)
1435-
goto err_alloc;
1436-
hyperv_cpuhp_online = ret;
1437-
1438-
ret = vmbus_connect();
1481+
is_confidential = ms_hyperv.confidential_vmbus_available;
1482+
if (is_confidential)
1483+
pr_info("Establishing connection to the confidential VMBus\n");
1484+
hv_para_set_sint_proxy(!is_confidential);
1485+
ret = vmbus_alloc_synic_and_connect();
14391486
if (ret)
14401487
goto err_connect;
14411488

@@ -1451,9 +1498,6 @@ static int vmbus_bus_init(void)
14511498
return 0;
14521499

14531500
err_connect:
1454-
cpuhp_remove_state(hyperv_cpuhp_online);
1455-
err_alloc:
1456-
hv_synic_free();
14571501
if (vmbus_irq == -1) {
14581502
hv_remove_vmbus_handler();
14591503
} else {

0 commit comments

Comments
 (0)