Skip to content

Commit e6b160b

Browse files
committed
Merge tag 'for-linus-6.2-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip
Pull xen updates from Juergen Gross: - fix memory leaks in error paths - add support for virtio PCI-devices in Xen guests on ARM - two minor fixes * tag 'for-linus-6.2-rc1-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/xen/tip: xen/privcmd: Fix a possible warning in privcmd_ioctl_mmap_resource() x86/xen: Fix memory leak in xen_init_lock_cpu() x86/xen: Fix memory leak in xen_smp_intr_init{_pv}() xen: fix xen.h build for CONFIG_XEN_PVH=y xen/virtio: Handle PCI devices which Host controller is described in DT xen/virtio: Optimize the setup of "xen-grant-dma" devices
2 parents 0ec5a38 + 8b997b2 commit e6b160b

9 files changed

Lines changed: 84 additions & 91 deletions

File tree

arch/arm/xen/enlighten.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -445,7 +445,7 @@ static int __init xen_guest_init(void)
445445
return 0;
446446

447447
if (IS_ENABLED(CONFIG_XEN_VIRTIO))
448-
virtio_set_mem_acc_cb(xen_virtio_mem_acc);
448+
virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
449449

450450
if (!acpi_disabled)
451451
xen_acpi_guest_init();

arch/x86/xen/smp.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -32,30 +32,30 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
3232

3333
void xen_smp_intr_free(unsigned int cpu)
3434
{
35+
kfree(per_cpu(xen_resched_irq, cpu).name);
36+
per_cpu(xen_resched_irq, cpu).name = NULL;
3537
if (per_cpu(xen_resched_irq, cpu).irq >= 0) {
3638
unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL);
3739
per_cpu(xen_resched_irq, cpu).irq = -1;
38-
kfree(per_cpu(xen_resched_irq, cpu).name);
39-
per_cpu(xen_resched_irq, cpu).name = NULL;
4040
}
41+
kfree(per_cpu(xen_callfunc_irq, cpu).name);
42+
per_cpu(xen_callfunc_irq, cpu).name = NULL;
4143
if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) {
4244
unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL);
4345
per_cpu(xen_callfunc_irq, cpu).irq = -1;
44-
kfree(per_cpu(xen_callfunc_irq, cpu).name);
45-
per_cpu(xen_callfunc_irq, cpu).name = NULL;
4646
}
47+
kfree(per_cpu(xen_debug_irq, cpu).name);
48+
per_cpu(xen_debug_irq, cpu).name = NULL;
4749
if (per_cpu(xen_debug_irq, cpu).irq >= 0) {
4850
unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL);
4951
per_cpu(xen_debug_irq, cpu).irq = -1;
50-
kfree(per_cpu(xen_debug_irq, cpu).name);
51-
per_cpu(xen_debug_irq, cpu).name = NULL;
5252
}
53+
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
54+
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
5355
if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) {
5456
unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq,
5557
NULL);
5658
per_cpu(xen_callfuncsingle_irq, cpu).irq = -1;
57-
kfree(per_cpu(xen_callfuncsingle_irq, cpu).name);
58-
per_cpu(xen_callfuncsingle_irq, cpu).name = NULL;
5959
}
6060
}
6161

@@ -65,6 +65,7 @@ int xen_smp_intr_init(unsigned int cpu)
6565
char *resched_name, *callfunc_name, *debug_name;
6666

6767
resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
68+
per_cpu(xen_resched_irq, cpu).name = resched_name;
6869
rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
6970
cpu,
7071
xen_reschedule_interrupt,
@@ -74,9 +75,9 @@ int xen_smp_intr_init(unsigned int cpu)
7475
if (rc < 0)
7576
goto fail;
7677
per_cpu(xen_resched_irq, cpu).irq = rc;
77-
per_cpu(xen_resched_irq, cpu).name = resched_name;
7878

7979
callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
80+
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
8081
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
8182
cpu,
8283
xen_call_function_interrupt,
@@ -86,21 +87,21 @@ int xen_smp_intr_init(unsigned int cpu)
8687
if (rc < 0)
8788
goto fail;
8889
per_cpu(xen_callfunc_irq, cpu).irq = rc;
89-
per_cpu(xen_callfunc_irq, cpu).name = callfunc_name;
9090

9191
if (!xen_fifo_events) {
9292
debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
93+
per_cpu(xen_debug_irq, cpu).name = debug_name;
9394
rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu,
9495
xen_debug_interrupt,
9596
IRQF_PERCPU | IRQF_NOBALANCING,
9697
debug_name, NULL);
9798
if (rc < 0)
9899
goto fail;
99100
per_cpu(xen_debug_irq, cpu).irq = rc;
100-
per_cpu(xen_debug_irq, cpu).name = debug_name;
101101
}
102102

103103
callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
104+
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
104105
rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
105106
cpu,
106107
xen_call_function_single_interrupt,
@@ -110,7 +111,6 @@ int xen_smp_intr_init(unsigned int cpu)
110111
if (rc < 0)
111112
goto fail;
112113
per_cpu(xen_callfuncsingle_irq, cpu).irq = rc;
113-
per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name;
114114

115115
return 0;
116116

arch/x86/xen/smp_pv.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -97,18 +97,18 @@ asmlinkage __visible void cpu_bringup_and_idle(void)
9797

9898
void xen_smp_intr_free_pv(unsigned int cpu)
9999
{
100+
kfree(per_cpu(xen_irq_work, cpu).name);
101+
per_cpu(xen_irq_work, cpu).name = NULL;
100102
if (per_cpu(xen_irq_work, cpu).irq >= 0) {
101103
unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
102104
per_cpu(xen_irq_work, cpu).irq = -1;
103-
kfree(per_cpu(xen_irq_work, cpu).name);
104-
per_cpu(xen_irq_work, cpu).name = NULL;
105105
}
106106

107+
kfree(per_cpu(xen_pmu_irq, cpu).name);
108+
per_cpu(xen_pmu_irq, cpu).name = NULL;
107109
if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
108110
unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
109111
per_cpu(xen_pmu_irq, cpu).irq = -1;
110-
kfree(per_cpu(xen_pmu_irq, cpu).name);
111-
per_cpu(xen_pmu_irq, cpu).name = NULL;
112112
}
113113
}
114114

@@ -118,6 +118,7 @@ int xen_smp_intr_init_pv(unsigned int cpu)
118118
char *callfunc_name, *pmu_name;
119119

120120
callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
121+
per_cpu(xen_irq_work, cpu).name = callfunc_name;
121122
rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
122123
cpu,
123124
xen_irq_work_interrupt,
@@ -127,18 +128,17 @@ int xen_smp_intr_init_pv(unsigned int cpu)
127128
if (rc < 0)
128129
goto fail;
129130
per_cpu(xen_irq_work, cpu).irq = rc;
130-
per_cpu(xen_irq_work, cpu).name = callfunc_name;
131131

132132
if (is_xen_pmu) {
133133
pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
134+
per_cpu(xen_pmu_irq, cpu).name = pmu_name;
134135
rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
135136
xen_pmu_irq_handler,
136137
IRQF_PERCPU|IRQF_NOBALANCING,
137138
pmu_name, NULL);
138139
if (rc < 0)
139140
goto fail;
140141
per_cpu(xen_pmu_irq, cpu).irq = rc;
141-
per_cpu(xen_pmu_irq, cpu).name = pmu_name;
142142
}
143143

144144
return 0;

arch/x86/xen/spinlock.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ void xen_init_lock_cpu(int cpu)
7575
cpu, per_cpu(lock_kicker_irq, cpu));
7676

7777
name = kasprintf(GFP_KERNEL, "spinlock%d", cpu);
78+
per_cpu(irq_name, cpu) = name;
7879
irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR,
7980
cpu,
8081
dummy_handler,
@@ -85,7 +86,6 @@ void xen_init_lock_cpu(int cpu)
8586
if (irq >= 0) {
8687
disable_irq(irq); /* make sure it's never delivered */
8788
per_cpu(lock_kicker_irq, cpu) = irq;
88-
per_cpu(irq_name, cpu) = name;
8989
}
9090

9191
printk("cpu %d spinlock event irq %d\n", cpu, irq);
@@ -98,6 +98,8 @@ void xen_uninit_lock_cpu(int cpu)
9898
if (!xen_pvspin)
9999
return;
100100

101+
kfree(per_cpu(irq_name, cpu));
102+
per_cpu(irq_name, cpu) = NULL;
101103
/*
102104
* When booting the kernel with 'mitigations=auto,nosmt', the secondary
103105
* CPUs are not activated, and lock_kicker_irq is not initialized.
@@ -108,8 +110,6 @@ void xen_uninit_lock_cpu(int cpu)
108110

109111
unbind_from_irqhandler(irq, NULL);
110112
per_cpu(lock_kicker_irq, cpu) = -1;
111-
kfree(per_cpu(irq_name, cpu));
112-
per_cpu(irq_name, cpu) = NULL;
113113
}
114114

115115
PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen);

drivers/xen/grant-dma-ops.c

Lines changed: 58 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
#include <linux/module.h>
1111
#include <linux/dma-map-ops.h>
1212
#include <linux/of.h>
13+
#include <linux/pci.h>
1314
#include <linux/pfn.h>
1415
#include <linux/xarray.h>
1516
#include <linux/virtio_anchor.h>
@@ -292,50 +293,48 @@ static const struct dma_map_ops xen_grant_dma_ops = {
292293
.dma_supported = xen_grant_dma_supported,
293294
};
294295

295-
static bool xen_is_dt_grant_dma_device(struct device *dev)
296+
static struct device_node *xen_dt_get_node(struct device *dev)
296297
{
297-
struct device_node *iommu_np;
298-
bool has_iommu;
298+
if (dev_is_pci(dev)) {
299+
struct pci_dev *pdev = to_pci_dev(dev);
300+
struct pci_bus *bus = pdev->bus;
299301

300-
iommu_np = of_parse_phandle(dev->of_node, "iommus", 0);
301-
has_iommu = iommu_np &&
302-
of_device_is_compatible(iommu_np, "xen,grant-dma");
303-
of_node_put(iommu_np);
302+
/* Walk up to the root bus to look for PCI Host controller */
303+
while (!pci_is_root_bus(bus))
304+
bus = bus->parent;
304305

305-
return has_iommu;
306-
}
307-
308-
bool xen_is_grant_dma_device(struct device *dev)
309-
{
310-
/* XXX Handle only DT devices for now */
311-
if (dev->of_node)
312-
return xen_is_dt_grant_dma_device(dev);
313-
314-
return false;
315-
}
316-
317-
bool xen_virtio_mem_acc(struct virtio_device *dev)
318-
{
319-
if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain())
320-
return true;
306+
return of_node_get(bus->bridge->parent->of_node);
307+
}
321308

322-
return xen_is_grant_dma_device(dev->dev.parent);
309+
return of_node_get(dev->of_node);
323310
}
324311

325312
static int xen_dt_grant_init_backend_domid(struct device *dev,
326-
struct xen_grant_dma_data *data)
313+
struct device_node *np,
314+
domid_t *backend_domid)
327315
{
328-
struct of_phandle_args iommu_spec;
316+
struct of_phandle_args iommu_spec = { .args_count = 1 };
329317

330-
if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells",
331-
0, &iommu_spec)) {
332-
dev_err(dev, "Cannot parse iommus property\n");
333-
return -ESRCH;
318+
if (dev_is_pci(dev)) {
319+
struct pci_dev *pdev = to_pci_dev(dev);
320+
u32 rid = PCI_DEVID(pdev->bus->number, pdev->devfn);
321+
322+
if (of_map_id(np, rid, "iommu-map", "iommu-map-mask", &iommu_spec.np,
323+
iommu_spec.args)) {
324+
dev_dbg(dev, "Cannot translate ID\n");
325+
return -ESRCH;
326+
}
327+
} else {
328+
if (of_parse_phandle_with_args(np, "iommus", "#iommu-cells",
329+
0, &iommu_spec)) {
330+
dev_dbg(dev, "Cannot parse iommus property\n");
331+
return -ESRCH;
332+
}
334333
}
335334

336335
if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") ||
337336
iommu_spec.args_count != 1) {
338-
dev_err(dev, "Incompatible IOMMU node\n");
337+
dev_dbg(dev, "Incompatible IOMMU node\n");
339338
of_node_put(iommu_spec.np);
340339
return -ESRCH;
341340
}
@@ -346,12 +345,31 @@ static int xen_dt_grant_init_backend_domid(struct device *dev,
346345
* The endpoint ID here means the ID of the domain where the
347346
* corresponding backend is running
348347
*/
349-
data->backend_domid = iommu_spec.args[0];
348+
*backend_domid = iommu_spec.args[0];
350349

351350
return 0;
352351
}
353352

354-
void xen_grant_setup_dma_ops(struct device *dev)
353+
static int xen_grant_init_backend_domid(struct device *dev,
354+
domid_t *backend_domid)
355+
{
356+
struct device_node *np;
357+
int ret = -ENODEV;
358+
359+
np = xen_dt_get_node(dev);
360+
if (np) {
361+
ret = xen_dt_grant_init_backend_domid(dev, np, backend_domid);
362+
of_node_put(np);
363+
} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT) || xen_pv_domain()) {
364+
dev_info(dev, "Using dom0 as backend\n");
365+
*backend_domid = 0;
366+
ret = 0;
367+
}
368+
369+
return ret;
370+
}
371+
372+
static void xen_grant_setup_dma_ops(struct device *dev, domid_t backend_domid)
355373
{
356374
struct xen_grant_dma_data *data;
357375

@@ -365,16 +383,7 @@ void xen_grant_setup_dma_ops(struct device *dev)
365383
if (!data)
366384
goto err;
367385

368-
if (dev->of_node) {
369-
if (xen_dt_grant_init_backend_domid(dev, data))
370-
goto err;
371-
} else if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) {
372-
dev_info(dev, "Using dom0 as backend\n");
373-
data->backend_domid = 0;
374-
} else {
375-
/* XXX ACPI device unsupported for now */
376-
goto err;
377-
}
386+
data->backend_domid = backend_domid;
378387

379388
if (store_xen_grant_dma_data(dev, data)) {
380389
dev_err(dev, "Cannot store Xen grant DMA data\n");
@@ -392,12 +401,14 @@ void xen_grant_setup_dma_ops(struct device *dev)
392401

393402
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
394403
{
395-
bool ret = xen_virtio_mem_acc(dev);
404+
domid_t backend_domid;
396405

397-
if (ret)
398-
xen_grant_setup_dma_ops(dev->dev.parent);
406+
if (!xen_grant_init_backend_domid(dev->dev.parent, &backend_domid)) {
407+
xen_grant_setup_dma_ops(dev->dev.parent, backend_domid);
408+
return true;
409+
}
399410

400-
return ret;
411+
return false;
401412
}
402413

403414
MODULE_DESCRIPTION("Xen grant DMA-mapping layer");

drivers/xen/privcmd.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -760,7 +760,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
760760
goto out;
761761
}
762762

763-
pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL);
763+
pfns = kcalloc(kdata.num, sizeof(*pfns), GFP_KERNEL | __GFP_NOWARN);
764764
if (!pfns) {
765765
rc = -ENOMEM;
766766
goto out;

include/xen/arm/xen-ops.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,7 @@
88
static inline void xen_setup_dma_ops(struct device *dev)
99
{
1010
#ifdef CONFIG_XEN
11-
if (xen_is_grant_dma_device(dev))
12-
xen_grant_setup_dma_ops(dev);
13-
else if (xen_swiotlb_detect())
11+
if (xen_swiotlb_detect())
1412
dev->dma_ops = &xen_swiotlb_dma_ops;
1513
#endif
1614
}

include/xen/xen-ops.h

Lines changed: 0 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -216,26 +216,10 @@ static inline void xen_preemptible_hcall_end(void) { }
216216
#endif /* CONFIG_XEN_PV && !CONFIG_PREEMPTION */
217217

218218
#ifdef CONFIG_XEN_GRANT_DMA_OPS
219-
void xen_grant_setup_dma_ops(struct device *dev);
220-
bool xen_is_grant_dma_device(struct device *dev);
221-
bool xen_virtio_mem_acc(struct virtio_device *dev);
222219
bool xen_virtio_restricted_mem_acc(struct virtio_device *dev);
223220
#else
224-
static inline void xen_grant_setup_dma_ops(struct device *dev)
225-
{
226-
}
227-
static inline bool xen_is_grant_dma_device(struct device *dev)
228-
{
229-
return false;
230-
}
231-
232221
struct virtio_device;
233222

234-
static inline bool xen_virtio_mem_acc(struct virtio_device *dev)
235-
{
236-
return false;
237-
}
238-
239223
static inline bool xen_virtio_restricted_mem_acc(struct virtio_device *dev)
240224
{
241225
return false;

0 commit comments

Comments
 (0)