Skip to content

Commit 9408ace

Browse files
committed
ALSA: memalloc: Drop Xen PV workaround again
Since recently in the commit e469e20 ("ALSA: memalloc: Let IOMMU handle S/G primarily"), the SG buffer allocation code was modified to use the standard DMA code primarily and the fallback is applied only limitedly. This made the Xen PV specific workarounds we took in the commit 53466eb ("ALSA: memalloc: Workaround for Xen PV") rather superfluous. It was a hackish workaround for the regression at that time, and it seems that it's causing another issues (reportedly memory corruptions). So it's better to clean it up, after all. Link: https://lore.kernel.org/20240906184209.25423-1-ariadne@ariadne.space Cc: Ariadne Conill <ariadne@ariadne.space> Link: https://patch.msgid.link/20240910113100.32542-1-tiwai@suse.de Signed-off-by: Takashi Iwai <tiwai@suse.de>
1 parent 0ccbc99 commit 9408ace

1 file changed

Lines changed: 3 additions & 16 deletions

File tree

sound/core/memalloc.c

Lines changed: 3 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -667,7 +667,6 @@ static const struct snd_malloc_ops snd_dma_noncontig_ops = {
667667
#ifdef CONFIG_SND_DMA_SGBUF
668668
/* Fallback SG-buffer allocations for x86 */
669669
struct snd_dma_sg_fallback {
670-
bool use_dma_alloc_coherent;
671670
size_t count;
672671
struct page **pages;
673672
/* DMA address array; the first page contains #pages in ~PAGE_MASK */
@@ -687,13 +686,8 @@ static void __snd_dma_sg_fallback_free(struct snd_dma_buffer *dmab,
687686
size = sgbuf->addrs[i] & ~PAGE_MASK;
688687
if (WARN_ON(!size))
689688
break;
690-
if (sgbuf->use_dma_alloc_coherent)
691-
dma_free_coherent(dmab->dev.dev, size << PAGE_SHIFT,
692-
page_address(sgbuf->pages[i]),
693-
sgbuf->addrs[i] & PAGE_MASK);
694-
else
695-
do_free_pages(page_address(sgbuf->pages[i]),
696-
size << PAGE_SHIFT, false);
689+
do_free_pages(page_address(sgbuf->pages[i]),
690+
size << PAGE_SHIFT, false);
697691
i += size;
698692
}
699693
}
@@ -715,7 +709,6 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
715709
sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
716710
if (!sgbuf)
717711
return NULL;
718-
sgbuf->use_dma_alloc_coherent = cpu_feature_enabled(X86_FEATURE_XENPV);
719712
size = PAGE_ALIGN(size);
720713
sgbuf->count = size >> PAGE_SHIFT;
721714
sgbuf->pages = kvcalloc(sgbuf->count, sizeof(*sgbuf->pages), GFP_KERNEL);
@@ -728,10 +721,7 @@ static void *snd_dma_sg_fallback_alloc(struct snd_dma_buffer *dmab, size_t size)
728721
chunk = (PAGE_SIZE - 1) << PAGE_SHIFT; /* to fit in low bits in addrs */
729722
while (size > 0) {
730723
chunk = min(size, chunk);
731-
if (sgbuf->use_dma_alloc_coherent)
732-
p = dma_alloc_coherent(dmab->dev.dev, chunk, &addr, DEFAULT_GFP);
733-
else
734-
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
724+
p = do_alloc_pages(dmab->dev.dev, chunk, &addr, false);
735725
if (!p) {
736726
if (chunk <= PAGE_SIZE)
737727
goto error;
@@ -803,9 +793,6 @@ static void *snd_dma_sg_alloc(struct snd_dma_buffer *dmab, size_t size)
803793
int type = dmab->dev.type;
804794
void *p;
805795

806-
if (cpu_feature_enabled(X86_FEATURE_XENPV))
807-
return snd_dma_sg_fallback_alloc(dmab, size);
808-
809796
/* try the standard DMA API allocation at first */
810797
if (type == SNDRV_DMA_TYPE_DEV_WC_SG)
811798
dmab->dev.type = SNDRV_DMA_TYPE_DEV_WC;

0 commit comments

Comments
 (0)